repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ilexius/odoo | addons/im_livechat/report/im_livechat_report_channel.py | 7 | 3048 | # -*- coding: utf-8 -*-
from openerp import fields, models, tools
class ImLivechatReportChannel(models.Model):
""" Livechat Support Report on the Channels """
_name = "im_livechat.report.channel"
_description = "Livechat Support Report"
_order = 'start_date, technical_name'
_auto = False
uuid = fields.Char('UUID', readonly=True)
channel_id = fields.Many2one('mail.channel', 'Conversation', readonly=True)
channel_name = fields.Char('Channel Name', readonly=True)
technical_name = fields.Char('Code', readonly=True)
livechat_channel_id = fields.Many2one('im_livechat.channel', 'Channel', readonly=True)
start_date = fields.Datetime('Start Date of session', readonly=True, help="Start date of the conversation")
start_date_hour = fields.Char('Hour of start Date of session', readonly=True)
duration = fields.Float('Average duration', digits=(16, 2), readonly=True, group_operator="avg", help="Duration of the conversation (in seconds)")
nbr_speaker = fields.Integer('# of speakers', readonly=True, group_operator="avg", help="Number of different speakers")
nbr_message = fields.Integer('Average message', readonly=True, group_operator="avg", help="Number of message in the conversation")
partner_id = fields.Many2one('res.partner', 'Operator', readonly=True)
def init(self, cr):
# Note : start_date_hour must be remove when the read_group will allow grouping on the hour of a datetime. Don't forget to change the view !
tools.drop_view_if_exists(cr, 'im_livechat_report_channel')
cr.execute("""
CREATE OR REPLACE VIEW im_livechat_report_channel AS (
SELECT
C.id as id,
C.uuid as uuid,
C.id as channel_id,
C.name as channel_name,
CONCAT(L.name, ' / ', C.id) as technical_name,
C.livechat_channel_id as livechat_channel_id,
C.create_date as start_date,
to_char(date_trunc('hour', C.create_date), 'YYYY-MM-DD HH24:MI:SS') as start_date_hour,
EXTRACT('epoch' FROM (max((SELECT (max(M.create_date)) FROM mail_message M JOIN mail_message_mail_channel_rel R ON (R.mail_message_id = M.id) WHERE R.mail_channel_id = C.id))-C.create_date)) as duration,
count(distinct P.id) as nbr_speaker,
count(distinct M.id) as nbr_message,
MAX(S.partner_id) as partner_id
FROM mail_channel C
JOIN mail_message_mail_channel_rel R ON (C.id = R.mail_channel_id)
JOIN mail_message M ON (M.id = R.mail_message_id)
JOIN mail_channel_partner S ON (S.channel_id = C.id)
JOIN im_livechat_channel L ON (L.id = C.livechat_channel_id)
LEFT JOIN res_partner P ON (M.author_id = P.id)
GROUP BY C.id, C.name, C.livechat_channel_id, L.name, C.create_date, C.uuid
)
""")
| gpl-3.0 |
njantrania/osf.io | tasks.py | 2 | 25686 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from invoke import task, run
from website import settings
logging.getLogger('invoke').setLevel(logging.CRITICAL)
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def get_bin_path():
"""Get parent path of current python binary.
"""
return os.path.dirname(sys.executable)
def bin_prefix(cmd):
"""Prefix command with current binary path.
"""
return os.path.join(get_bin_path(), cmd)
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, live=False):
"""Run the API server."""
cmd = 'python manage.py runserver {}'.format(port)
if live:
cmd += ' livereload'
run(cmd, echo=True, pty=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
Available variables:
{context}
"""
def make_shell_context():
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
app = init_app()
context = {
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell():
context = make_shell_context()
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context)
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_host=None, db_port=None, db_name=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_host:
os.environ['SHAREJS_DB_HOST'] = db_host
if db_port:
os.environ['SHAREJS_DB_PORT'] = db_port
if db_name:
os.environ['SHAREJS_DB_NAME'] = db_name
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug", hostname=None, beat=False):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(level="debug", schedule=None):
"""Run the Celery process."""
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.tasks -l {0}'.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
run(bin_prefix(cmd), pty=True)
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run("curl -s -XDELETE {uri}/{index}*".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run("curl -s -XPUT {uri}/{index}".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
def pip_install(req_file):
"""Return the proper 'pip install' command for installing the dependencies
defined in ``req_file``.
"""
cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file))
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
return cmd
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
inv requirements --metrics
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_api():
"""Run the API test suite."""
test_module(module="api_tests/")
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
test_api()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_osf():
"""
Run half of the tests to help travis go faster
"""
flake()
jshint()
test_osf()
@task
def test_travis_else():
"""
Run other half of the tests to help travis go faster
"""
test_addons()
test_api()
karma(single=True, browsers='PhantomJS')
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
inv wheelhouse --metrics
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics:
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task
def npm_bower():
print('Installing bower')
run('npm install -g bower', echo=True)
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def build_js_config_files():
from website import settings
from website.app import build_js_config_files as _build_js_config_files
print('Building JS config files...')
_build_js_config_files(settings)
print("...Done.")
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
build_js_config_files()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
| apache-2.0 |
Adel-Magebinary/odoo | addons/l10n_ch/__openerp__.py | 260 | 2575 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Switzerland - Accounting',
'description': """
Swiss localization
==================
**Multilang Swiss PME/KMU 2015 account chart and taxes**
**Author:** Camptocamp SA
**Financial contributors:** Prisme Solutions Informatique SA, Quod SA
**Translation contributors:** brain-tec AG, Agile Business Group
The swiss localization addons are organized this way:
``l10n_ch``
Multilang Swiss PME/KMU 2015 account chart and taxes (official addon)
``l10n_ch_base_bank``
Technical module that introduces a new and simplified version of bank
type management
``l10n_ch_bank``
List of swiss banks
``l10n_ch_zip``
List of swiss postal zip
``l10n_ch_dta``
Support of the DTA payment protocol (will be deprecated by the end of 2014)
``l10n_ch_payment_slip``
Support of ESR/BVR payment slip report and reconciliation.
``l10n_ch`` is located in the core Odoo modules. The other modules are in:
https://github.com/OCA/l10n-switzerland
""",
'version': '8.0',
'author': 'Camptocamp',
'category': 'Localization/Account Charts',
'website': 'http://www.camptocamp.com',
'depends': ['account', 'l10n_multilang'],
'data': ['report/balance_sheet.xml',
'report/profit_and_loss.xml',
'chart/account.xml',
'chart/vat2011.xml',
'chart/fiscal_position.xml',
],
'demo': [],
'test': [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rmfitzpatrick/ansible | lib/ansible/modules/commands/shell.py | 44 | 5468 | # Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# # There is no actual shell module source, when you use 'shell' in ansible,
# it runs the 'command' module with special arguments and it behaves differently.
# See the command source and the comment "#USE_SHELL".
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: shell
short_description: Execute commands in nodes.
description:
- The C(shell) module takes the command name followed by a list of space-delimited arguments.
It is almost exactly like the M(command) module but runs
the command through a shell (C(/bin/sh)) on the remote node.
- For Windows targets, use the M(win_shell) module instead.
version_added: "0.2"
options:
free_form:
description:
- The shell module takes a free form command to run, as a string. There's not an actual
option named "free form". See the examples!
required: true
default: null
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
required: false
default: null
version_added: "0.6"
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
version_added: "1.8"
stdin:
version_added: "2.4"
description:
- Set the stdin of the command directly to the specified value.
required: false
default: null
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend of using M(command) unless the C(shell)
module is explicitly required. When running ad-hoc commands, use your best
judgement.
- To sanitize any variables passed to the shell module, you should use
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
- For Windows targets, use the M(win_shell) module instead.
requirements: [ ]
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
- name: Execute the command in remote shell; stdout goes to the specified file on the remote.
shell: somescript.sh >> somelog.txt
- name: Change the working directory to somedir/ before executing the command.
shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
# You can also use the 'args' form to provide the options.
- name: This command will change the working directory to somedir/ and will only run when somedir/somelog.txt doesn't exist.
shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
creates: somelog.txt
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
shell: cat < /tmp/*txt
args:
executable: /bin/bash
- name: Run a command using a templated variable (always use quote filter to avoid injection)
shell: cat {{ myfile|quote }}
# You can use shell to run other executables to perform actions inline
- name: Run expect to wait for a successful PXE boot via out-of-band CIMC
shell: |
set timeout 300
spawn ssh admin@{{ cimc_host }}
expect "password:"
send "{{ cimc_password }}\\n"
expect "\\n{{ cimc_name }}"
send "connect host\\n"
expect "pxeboot.n12"
send "\\n"
exit 0
args:
executable: /usr/bin/expect
delegate_to: localhost
'''
RETURN = '''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 |
chenhengjie123/crosswalk-web-driver | xwalkdriver/embed_version_in_cpp.py | 3 | 1108 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Embeds Xwalk user data files in C++ code."""
import optparse
import os
import sys
import xwalk_paths
import cpp_source
sys.path.insert(0, os.path.join(xwalk_paths.GetSrc(), 'build', 'util'))
import lastchange
def main():
parser = optparse.OptionParser()
parser.add_option('', '--version-file')
parser.add_option(
'', '--directory', type='string', default='.',
help='Path to directory where the cc/h file should be created')
options, args = parser.parse_args()
version = open(options.version_file, 'r').read().strip()
revision = lastchange.FetchVersionInfo(None).revision
if revision:
version += '.' + revision.strip()
global_string_map = {
'kXwalkDriverVersion': version
}
cpp_source.WriteSource('version',
'xwalk/test/xwalkdriver',
options.directory, global_string_map)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
magicwang-cn/kubernetes | examples/selenium/selenium-test.py | 173 | 1109 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def check_browser(browser):
driver = webdriver.Remote(
command_executor='http://selenium-hub:4444/wd/hub',
desired_capabilities=getattr(DesiredCapabilities, browser)
)
driver.get("http://google.com")
assert "google" in driver.page_source
driver.close()
print("Browser %s checks out!" % browser)
check_browser("FIREFOX")
check_browser("CHROME")
| apache-2.0 |
nschmidtALICE/AliPhysics | PWGPP/Alignment/utils/readMilleBinary.py | 41 | 4281 | #!/usr/bin/python
## \file
# Read millepede binary file and print records
#
# Hardcoded defaults can be replaced by command line arguments for
# - Name of binary file
# - Number of records to print
# - Number of records to skip (optional)
#
# Description of the output from readMilleBinary.py
# - Records (tracks) start with \c '===' followed by record number and length
# (<0 for binary files containing doubles)
# - Measurements: A measurement with global derivatives is called a 'global measurement',
# otherwise 'local measurement'. Usually the real measurements from the detectors are 'global'
# ones and virtual measurements e.g. to describe multiple scattering are 'local'.
# - 'Global' measurements start with \c '-g-' followed by measurement number, first global label,
# number of local and global derivatives, measurement value and error. The next lines contain
# local and global labels (array('i')) and derivatives (array('f') or array('d')).
# - 'Local' measurements start with \c '-l-' followed by measurement number, first local label,
# number of local and global derivatives, measurement value and error. The next lines contain
# local labels (array('i')) and derivatives (array('f') or array('d')).
#
# Tested with SL4, SL5, SL6
import array, sys
# ############### read millepede binary file #################
#
## Binary file type (C or Fortran)
Cfiles = 1 # Cfiles
#Cfiles = 0 # Fortran files
#
## Integer format
intfmt = 'i' # SL5, gcc-4
#intfmt = 'l' # SL4, gcc-3
#
## Binary file name
fname = "milleBinaryISN.dat"
#
## number of records (tracks) to show
mrec = 10
## number of records (track) to skip before
skiprec = 0
#
# ## C. Kleinwort - DESY ########################
# ## use command line arguments ?
narg = len(sys.argv)
if narg > 1:
if narg < 3:
print " usage: readMilleBinary.py <file name> <number of records> [<number of records to skip>]"
sys.exit(2)
else:
fname = sys.argv[1]
mrec = int(sys.argv[2])
if narg > 3:
skiprec = int(sys.argv[3])
#print " input ", fname, mrec, skiprec
f = open(fname, "rb")
nrec = 0
try:
while (nrec < mrec + skiprec):
# read 1 record
if (Cfiles == 0):
lenf = array.array(intfmt)
lenf.fromfile(f, 2)
length = array.array(intfmt)
length.fromfile(f, 1)
nr = abs(length[0] / 2)
nrec += 1
if length[0] > 0:
glder = array.array('f')
else:
glder = array.array('d')
glder.fromfile(f, nr)
inder = array.array(intfmt)
inder.fromfile(f, nr)
if (Cfiles == 0):
lenf = array.array(intfmt)
lenf.fromfile(f, 2)
if (nrec <= skiprec): # must be after last fromfile
continue
print " === NR ", nrec, length[0] / 2
i = 0
nh = 0
ja = 0
jb = 0
jsp = 0
nsp = 0
while (i < (nr - 1)):
i += 1
while (i < nr) and (inder[i] != 0): i += 1
ja = i
i += 1
while (i < nr) and (inder[i] != 0): i += 1
jb = i
i += 1
while (i < nr) and (inder[i] != 0): i += 1
i -= 1
# special data ?
if (ja + 1 == jb) and (glder[jb] < 0.):
jsp = jb
nsp = int(-glder[jb])
i += nsp
print ' ### spec. ', nsp, inder[jsp + 1:i + 1], glder[jsp + 1:i + 1]
continue
nh += 1
if (jb < i):
# measurement with global derivatives
print ' -g- meas. ', nh, inder[jb + 1], jb - ja - 1, i - jb, glder[ja], glder[jb]
else:
# measurement without global derivatives
print ' -l- meas. ', nh, inder[ja + 1], jb - ja - 1, i - jb, glder[ja], glder[jb]
if (ja + 1 < jb):
print " local ", inder[ja + 1:jb]
print " local ", glder[ja + 1:jb]
if (jb + 1 < i + 1):
print " global ", inder[jb + 1:i + 1]
print " global ", glder[jb + 1:i + 1]
except EOFError:
pass
# print "end of file"
f.close()
| bsd-3-clause |
intgr/django | tests/template_tests/syntax_tests/i18n/test_get_language_info_list.py | 55 | 2028 | from django.test import SimpleTestCase
from django.utils import translation
from ...utils import setup
class GetLanguageInfoListTests(SimpleTestCase):
libraries = {
'custom': 'template_tests.templatetags.custom',
'i18n': 'django.templatetags.i18n',
}
@setup({'i18n30': '{% load i18n %}'
'{% get_language_info_list for langcodes as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n30(self):
output = self.engine.render_to_string('i18n30', {'langcodes': ['it', 'no']})
self.assertEqual(output, 'it: Italian/italiano bidi=False; no: Norwegian/norsk bidi=False; ')
@setup({'i18n31': '{% load i18n %}'
'{% get_language_info_list for langcodes as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n31(self):
output = self.engine.render_to_string('i18n31', {'langcodes': (('sl', 'Slovenian'), ('fa', 'Persian'))})
self.assertEqual(
output,
'sl: Slovenian/Sloven\u0161\u010dina bidi=False; '
'fa: Persian/\u0641\u0627\u0631\u0633\u06cc bidi=True; '
)
@setup({'i18n38_2': '{% load i18n custom %}'
'{% get_language_info_list for langcodes|noop:"x y" as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }}/{{ l.name_translated }} '
'bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n38_2(self):
with translation.override('cs'):
output = self.engine.render_to_string('i18n38_2', {'langcodes': ['it', 'fr']})
self.assertEqual(
output,
'it: Italian/italiano/italsky bidi=False; '
'fr: French/français/francouzsky bidi=False; '
)
| bsd-3-clause |
Asilva4700/Aljos | Aljos/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 1534 | 3426 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| gpl-3.0 |
twilio/twilio-python | tests/integration/messaging/v1/test_brand_registration.py | 1 | 5343 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BrandRegistrationTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations/BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BN0044409f7e067e279523808d267e2d85",
"account_sid": "AC78e8e67fc0246521490fb9907fd0c165",
"customer_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"a2p_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"date_created": "2021-01-27T14:18:35Z",
"date_updated": "2021-01-27T14:18:36Z",
"status": "PENDING",
"tcr_id": "BXXXXXX",
"failure_reason": "Registration error",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85"
}
'''
))
actual = self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations.list()
self.holodeck.assert_has_request(Request(
'get',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "data",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations?PageSize=50&Page=0"
},
"data": [
{
"sid": "BN0044409f7e067e279523808d267e2d85",
"account_sid": "AC78e8e67fc0246521490fb9907fd0c165",
"customer_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"a2p_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"date_created": "2021-01-27T14:18:35Z",
"date_updated": "2021-01-27T14:18:36Z",
"status": "APPROVED",
"tcr_id": "BXXXXXX",
"failure_reason": "Registration error",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85"
}
]
}
'''
))
actual = self.client.messaging.v1.brand_registrations.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations.create(customer_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", a2p_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {
'CustomerProfileBundleSid': "BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
'A2PProfileBundleSid': "BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
}
self.holodeck.assert_has_request(Request(
'post',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "BN0044409f7e067e279523808d267e2d85",
"account_sid": "AC78e8e67fc0246521490fb9907fd0c165",
"customer_profile_bundle_sid": "BU0000009f7e067e279523808d267e2d90",
"a2p_profile_bundle_sid": "BU1111109f7e067e279523808d267e2d85",
"date_created": "2021-01-28T10:45:51Z",
"date_updated": "2021-01-28T10:45:51Z",
"status": "PENDING",
"tcr_id": "BXXXXXX",
"failure_reason": "Registration error",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85"
}
'''
))
actual = self.client.messaging.v1.brand_registrations.create(customer_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", a2p_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
| mit |
PressLabs/cobalt | tests/unit/engine/conftest.py | 1 | 1900 | # Copyright 2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from etcd import Lock
from pytest import fixture, mark
from engine import Engine, Lease, Executor
@fixture
@mark.usefixtures('p_create_leaser', 'p_create_lock', 'p_create_executor')
def engine(m_etcd_client, volume_manager, machine_manager):
return Engine(m_etcd_client, volume_manager, machine_manager,
{'leaser': {'lease_ttl': 0, 'refresh_ttl': 0}, 'executor': {'timeout': 10}})
@fixture
def m_lock(mocker):
return mocker.MagicMock(spec=Lock)
@fixture
def m_lease(mocker):
return mocker.MagicMock(spec=Lease)
@fixture
def m_executor(mocker):
return mocker.MagicMock(spec=Executor)
@fixture
def executor(volume_manager, machine_manager):
return Executor(volume_manager, machine_manager, {'timeout': 0})
@fixture
def p_create_executor(mocker, engine):
return mocker.patch.object(engine, '_create_executor')
@fixture
def p_engine_executor_timeout(mocker, engine):
return mocker.patch.object(engine.executor, 'timeout')
@fixture
def p_engine_executor_reset(mocker, engine):
return mocker.patch.object(engine.executor, 'reset')
@fixture
def p_executor_process(mocker, executor):
return mocker.patch.object(executor, '_process')
@fixture
def p_executor_active_machines(mocker, executor):
return mocker.patch.object(executor, 'get_active_machines')
| apache-2.0 |
prutseltje/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_group.py | 15 | 5852 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_group
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower group.
description:
- Create, update, or destroy Ansible Tower groups. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the group.
required: True
description:
description:
- The description to use for the group.
inventory:
description:
- Inventory the group should be made a member of.
required: True
variables:
description:
- Variables to use for the group, use C(@) for a file.
credential:
description:
- Credential to use for the group.
source:
description:
- The source to use for this group.
choices: ["manual", "file", "ec2", "rax", "vmware", "gce", "azure", "azure_rm", "openstack", "satellite6" , "cloudforms", "custom"]
source_regions:
description:
- Regions for cloud provider.
source_vars:
description:
- Override variables from source with variables from this field.
instance_filters:
description:
- Comma-separated list of filter expressions for matching hosts.
group_by:
description:
- Limit groups automatically created from inventory source.
source_script:
description:
- Inventory script to be used when group type is C(custom).
overwrite:
description:
- Delete child groups and hosts not found in source.
type: bool
default: 'no'
overwrite_vars:
description:
- Override vars in child groups and hosts with those from external source.
update_on_launch:
description:
- Refresh inventory data from its source each time a job is run.
type: bool
default: 'no'
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower group
tower_group:
name: localhost
description: "Local Host Group"
inventory: "Local Inventory"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
import os
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
inventory=dict(required=True),
variables=dict(),
credential=dict(),
source=dict(choices=["manual", "file", "ec2", "rax", "vmware",
"gce", "azure", "azure_rm", "openstack",
"satellite6", "cloudforms", "custom"], default="manual"),
source_regions=dict(),
source_vars=dict(),
instance_filters=dict(),
group_by=dict(),
source_script=dict(),
overwrite=dict(type='bool', default=False),
overwrite_vars=dict(),
update_on_launch=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
inventory = module.params.get('inventory')
credential = module.params.get('credential')
state = module.params.get('state')
variables = module.params.get('variables')
if variables:
if variables.startswith('@'):
filename = os.path.expanduser(variables[1:])
with open(filename, 'r') as f:
variables = f.read()
json_output = {'group': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
group = tower_cli.get_resource('group')
try:
params = module.params.copy()
params['create_on_missing'] = True
params['variables'] = variables
inv_res = tower_cli.get_resource('inventory')
inv = inv_res.get(name=inventory)
params['inventory'] = inv['id']
if credential:
cred_res = tower_cli.get_resource('credential')
cred = cred_res.get(name=credential)
params['credential'] = cred['id']
if state == 'present':
result = group.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = group.delete(**params)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group, inventory not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
mdhaman/superdesk-core | apps/io/feeding_services/reuters.py | 1 | 11746 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import datetime
import traceback
import superdesk
import requests
from flask import current_app as app
from superdesk.errors import IngestApiError
from superdesk.etree import etree, ParseError
from superdesk.io.registry import register_feeding_service, register_feeding_service_parser
from superdesk.io.feeding_services.http_service import HTTPFeedingService
from superdesk.logging import logger
from superdesk.utc import utcnow
from urllib.parse import urlparse, urlunparse
from flask_babel import _
requests.packages.urllib3.disable_warnings()
class ReutersHTTPFeedingService(HTTPFeedingService):
"""
Feeding Service class which can read article(s) using HTTP provided by Reuters.
"""
NAME = 'reuters_http'
ERRORS = [IngestApiError.apiTimeoutError().get_error_description(),
IngestApiError.apiRedirectError().get_error_description(),
IngestApiError.apiRequestError().get_error_description(),
IngestApiError.apiUnicodeError().get_error_description(),
IngestApiError.apiParseError().get_error_description(),
IngestApiError.apiGeneralError().get_error_description()]
DATE_FORMAT = '%Y.%m.%d.%H.%M'
label = 'Reuters feed API'
fields = [
{
'id': 'url', 'type': 'text', 'label': 'Feed URL',
'placeholder': 'Feed URL', 'required': True,
'default': 'http://rmb.reuters.com/rmd/rest/xml'
},
{
'id': 'auth_url', 'type': 'text', 'label': 'URL for Authentication',
'placeholder': 'authentication url', 'required': True,
'default': 'https://commerce.reuters.com/rmd/rest/xml/login'
},
{
'id': 'username', 'type': 'text', 'label': 'Username',
'placeholder': 'Username', 'required': True
},
{
'id': 'password', 'type': 'password', 'label': 'Password',
'placeholder': 'Password', 'required': True
}
]
session = None
def _update(self, provider, update):
updated = utcnow()
last_updated = provider.get('last_updated')
ttl_minutes = app.config['INGEST_EXPIRY_MINUTES']
if not last_updated or last_updated < updated - datetime.timedelta(minutes=ttl_minutes):
last_updated = updated - datetime.timedelta(minutes=ttl_minutes)
self.provider = provider
provider_config = provider.get('config')
if not provider_config:
provider_config = {}
provider['config'] = provider_config
provider_config.setdefault('url', 'http://rmb.reuters.com/rmd/rest/xml')
provider_config.setdefault('auth_url', 'https://commerce.reuters.com/rmd/rest/xml/login')
self.URL = provider_config.get('url')
for channel in self._get_channels():
ids = self._get_article_ids(channel, last_updated, updated)
for id in ids:
try:
items = self.fetch_ingest(id)
if items:
yield items
# if there was an exception processing the one of the bunch log it and continue
except Exception as ex:
logger.warn('Reuters item {} has not been retrieved'.format(id))
logger.exception(ex)
def _get_channels(self):
"""Get subscribed channels."""
channels = []
tree = self._get_tree('channels')
for channel in tree.findall('channelInformation'):
channels.append(channel.find('alias').text)
return channels
def _get_tree(self, endpoint, payload=None):
"""Get xml response for given API endpoint and payload.
:param: endpoint
:type endpoint: str
:param: payload
:type payload: str
"""
if payload is None:
payload = {}
payload['token'] = self._get_auth_token(self.provider, update=True)
url = self._get_absolute_url(endpoint)
if not self.session:
self.session = requests.Session()
retries = 0
while True:
try:
response = self.session.get(url, params=payload, timeout=(30, 15))
except requests.exceptions.Timeout as ex:
if retries < 3:
logger.warn('Reuters API timeout retrying, retries {}'.format(retries))
retries += 1
continue
raise IngestApiError.apiTimeoutError(ex, self.provider)
except requests.exceptions.TooManyRedirects as ex:
# Tell the user their URL was bad and try a different one
raise IngestApiError.apiRedirectError(ex, self.provider)
except requests.exceptions.RequestException as ex:
# catastrophic error. bail.
raise IngestApiError.apiRequestError(ex, self.provider)
except Exception as error:
traceback.print_exc()
raise IngestApiError.apiGeneralError(error, self.provider)
if response.status_code == 404:
raise LookupError(_('Not found {payload}').format(payload=payload))
break
try:
return etree.fromstring(response.content) # workaround for http mock lib
except UnicodeEncodeError as error:
traceback.print_exc()
raise IngestApiError.apiUnicodeError(error, self.provider)
except ParseError as error:
traceback.print_exc()
raise IngestApiError.apiParseError(error, self.provider)
except Exception as error:
traceback.print_exc()
raise IngestApiError.apiGeneralError(error, self.provider)
def _get_absolute_url(self, endpoint):
"""
Get absolute URL for given endpoint.
:param: endpoint
:type endpoint: str
"""
return '/'.join([self.URL, endpoint])
def _get_article_ids(self, channel, last_updated, updated):
"""
Get article ids which should be upserted also save the poll token that is returned.
"""
ids = set()
payload = {'channel': channel, 'fieldsRef': 'id'}
# check if the channel has a pollToken if not fall back to dateRange
last_poll_token = self._get_poll_token(channel)
if last_poll_token is not None:
logger.info("Reuters requesting channel {} with poll token {}".format(channel, last_poll_token))
payload['pollToken'] = last_poll_token
else:
payload['dateRange'] = "%s-%s" % (self._format_date(last_updated), self._format_date(updated))
logger.info("Reuters requesting channel {} with dateRange {}".format(channel, payload['dateRange']))
tree = self._get_tree('items', payload)
status_code = tree.find('status').get('code') if tree.tag == 'results' else tree.get('code')
# check the returned status
if status_code != '10':
logger.warn("Reuters channel request returned status code {}".format(status_code))
# status code 30 indicates failure
if status_code == '30':
# invalid token
logger.warn("Reuters error on channel {} code {} {}".format(channel, tree.find('error').get('code'),
tree.find('error').text))
if tree.find('error').get('code') == '2100':
self._save_poll_token(channel, None)
logger.warn("Reuters channel invalid token reseting {}".format(status_code))
return ids
# extract the returned poll token if there is one
poll_token = tree.find('pollToken')
if poll_token is not None:
# a new token indicated new content
if poll_token.text != last_poll_token:
logger.info("Reuters channel {} new token {}".format(channel, poll_token.text))
self._save_poll_token(channel, poll_token.text)
else:
# the token has not changed, so nothing new
logger.info("Reuters channel {} nothing new".format(channel))
return ids
else:
logger.info("Reuters channel {} retrieved no token".format(channel))
return ids
for result in tree.findall('result'):
id = result.find('id').text
ids.add(id)
logger.info("Reuters id : {}".format(id))
return ids
def _save_poll_token(self, channel, poll_token):
"""Saves the poll token for the passed channel in the config section of the
:param channel:
:param poll_token:
:return:
"""
# get the provider in case it has been updated by another channel
ingest_provider_service = superdesk.get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(req=None, _id=self.provider[superdesk.config.ID_FIELD])
provider_token = provider.get('tokens')
if 'poll_tokens' not in provider_token:
provider_token['poll_tokens'] = {channel: poll_token}
else:
provider_token['poll_tokens'][channel] = poll_token
upd_provider = {'tokens': provider_token}
ingest_provider_service.system_update(self.provider[superdesk.config.ID_FIELD], upd_provider, self.provider)
def _get_poll_token(self, channel):
"""Get the poll token from provider config if it is available.
:param channel:
:return: token
"""
if 'tokens' in self.provider and 'poll_tokens' in self.provider['tokens']:
return self.provider.get('tokens').get('poll_tokens').get(channel, None)
def _format_date(self, date):
return date.strftime(self.DATE_FORMAT)
def fetch_ingest(self, id):
items = self._parse_items(id)
result_items = []
while items:
item = items.pop()
self.add_timestamps(item)
try:
items.extend(self._fetch_items_in_package(item))
result_items.append(item)
except LookupError as err:
self.log_item_error(err, item, self.provider)
return []
return result_items
def _parse_items(self, id):
"""
Parse item message and return given items.
"""
payload = {'id': id}
tree = self._get_tree('item', payload)
parser = self.get_feed_parser(self.provider, tree)
items = parser.parse(tree, self.provider)
return items
def _fetch_items_in_package(self, item):
"""
Fetch remote assets for given item.
"""
items = []
for group in item.get('groups', []):
for ref in group.get('refs', []):
if 'residRef' in ref:
items.extend(self._parse_items(ref.get('residRef')))
return items
def prepare_href(self, href, mimetype=None):
(scheme, netloc, path, params, query, fragment) = urlparse(href)
new_href = urlunparse((scheme, netloc, path, '', '', ''))
return '%s?auth_token=%s' % (new_href, self._get_auth_token(self.provider, update=True))
register_feeding_service(ReutersHTTPFeedingService)
register_feeding_service_parser(ReutersHTTPFeedingService.NAME, 'newsml2')
| agpl-3.0 |
izv/IzVerifier | IzVerifier/izspecs/containers/izclasses.py | 1 | 4268 | import re
from IzVerifier.izspecs.containers.izcontainer import IzContainer
from IzVerifier.izspecs.containers.constants import *
from os import walk
__author__ = 'fcanas'
class IzClasses(IzContainer):
"""
Container for parsing and storing custom classes used in izpack installers.
"""
properties = {
NAME: "classes",
DEFINITION_SPEC_FILES: [],
REFERENCE_SPEC_FILES: [
"install",
"userInputSpec",
"ProcessPanel.Spec",
"core-packs"],
ATTRIBUTES: ['class', 'name', 'classname', 'installer'],
SPEC_ELEMENT: '',
PARENT_OPENING_TAG: '',
PARENT_CLOSING_TAG: '',
WHITE_LIST: [],
PATTERNS: [],
READ_PATTERNS: [],
WRITE_PATTERNS: [],
WHITE_LIST_PATTERNS: ['^com.izforge.izpack.*$']
}
def __init__(self, path=None):
"""
Initializes the container from the path to the root of custom source code.
Note: does not make use of parent class __init__.
"""
self.container = {}
self.referenced = {}
if path:
self.parse(path)
def parse(self, root):
"""
Izclasses are not pre-defined anywhere. All we can do is make a collection of
all custom classes used in source code and index that.
"""
for paths, dirs, files in walk(root):
for f in files:
if '.java' in f:
path = paths + '/' + f
name = self.path_to_id(root, path)
self.container[name] = path
def get_keys(self):
"""
Returns a set of all the keys for existing custom classes.
"""
return set(self.container.keys()) | set(self.properties[WHITE_LIST])
def count(self):
"""
Return number of vars found in definition file.
"""
return len(self.container.keys())
def print_keys(self):
"""
Prints all of the variable keys found in definition spec.
"""
for key in self.container.keys():
print key
def get_spec_elements(self):
"""
Returns a set of xml elements defining each variable.
"""
return set(self.container.values())
def to_string(self):
return str(self.container)
def has_reference(self, element):
"""
Return true if the given element contains an izpack var reference.
"""
def is_izpack_class(classname):
"""
Determines if this references an izpack built-in class.
"""
if type(classname) is list:
classname = classname[0]
return not '.' in classname
if element.has_attr('name') and element.name == 'executeclass':
return not is_izpack_class(element['name'])
if element.has_attr('class'):
return not is_izpack_class(element['class'])
if element.has_attr('classname'):
return not is_izpack_class(element['classname'])
if element.has_attr('installer') and element.name == 'listener':
return not is_izpack_class(element['installer'])
return False
def has_definition(self, element):
"""
Custom izpack classes are not defined by xml descriptors, so this method is unused.
"""
pass
@staticmethod
def path_to_id(root, path):
"""
Transforms a classpath to a class id.
"""
path = path.replace(root, '').replace('/', '.')
path = re.sub('\.java$', '', path)
return path
@staticmethod
def element_sort_key(element):
"""
Returns the key to use when sorting elements of this container.
"""
return element['class'].lower()
@staticmethod
def get_identifier(element):
"""
Returns the identifying value for this element.
"""
return element['class']
@staticmethod
def get_value(element):
"""
Returns the main 'value' for this element.
"""
return element
@staticmethod
def ref_transformer(ref):
"""
Unwraps the ref if necessary.
"""
return [ref]
| mit |
NonnEmilia/OpenGenfri | pos/pos/settings.py | 2 | 3093 | """
Django settings for pos project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Ignore the following error when using ipython:
# /usr/local/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py:63:
# RuntimeWarning: SQLite received a naive datetime (2014-12-19 16:46:59.585773)
# while time zone support is active.
import warnings
import exceptions
warnings.filterwarnings("ignore", category=exceptions.RuntimeWarning,
module='django.db.backends.sqlite3.base', lineno=63)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'at=5ht_msawx#8jns_#fu!)x6c7n*0x5+2&(&mmq^o)!#-#h^n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webpos',
'easy_pdf',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'pos.urls'
WSGI_APPLICATION = 'pos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# 'ATOMIC_REQUESTS': True,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'it-IT'
TIME_ZONE = 'Europe/Rome'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
#LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': 'everything.log',
# },
# },
# 'loggers': {
# 'django.request': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
#}
STATIC_ROOT = BASE_DIR + '/static/'
| mit |
htwenhe/DJOA | env/Lib/site-packages/django/utils/version.py | 344 | 2445 | from __future__ import unicode_literals
import datetime
import os
import subprocess
from django.utils.lru_cache import lru_cache
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_main_version(version=None):
"Returns main version (X.Y[.Z]) from VERSION."
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@lru_cache()
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| mit |
zanbel/david | tests/test_vocabcompiler.py | 38 | 5213 | #!/usr/bin/env python2
# -*- coding: utf-8-*-
import unittest
import tempfile
import contextlib
import logging
import shutil
import mock
from client import vocabcompiler
class TestVocabCompiler(unittest.TestCase):
def testPhraseExtraction(self):
expected_phrases = ['MOCK']
mock_module = mock.Mock()
mock_module.WORDS = ['MOCK']
with mock.patch('client.brain.Brain.get_modules',
classmethod(lambda cls: [mock_module])):
extracted_phrases = vocabcompiler.get_all_phrases()
self.assertEqual(expected_phrases, extracted_phrases)
def testKeywordPhraseExtraction(self):
expected_phrases = ['MOCK']
with tempfile.TemporaryFile() as f:
# We can't use mock_open here, because it doesn't seem to work
# with the 'for line in f' syntax
f.write("MOCK\n")
f.seek(0)
with mock.patch('%s.open' % vocabcompiler.__name__,
return_value=f, create=True):
extracted_phrases = vocabcompiler.get_keyword_phrases()
self.assertEqual(expected_phrases, extracted_phrases)
class TestVocabulary(unittest.TestCase):
VOCABULARY = vocabcompiler.DummyVocabulary
@contextlib.contextmanager
def do_in_tempdir(self):
tempdir = tempfile.mkdtemp()
yield tempdir
shutil.rmtree(tempdir)
def testVocabulary(self):
phrases = ['GOOD BAD UGLY']
with self.do_in_tempdir() as tempdir:
self.vocab = self.VOCABULARY(path=tempdir)
self.assertIsNone(self.vocab.compiled_revision)
self.assertFalse(self.vocab.is_compiled)
self.assertFalse(self.vocab.matches_phrases(phrases))
# We're now testing error handling. To avoid flooding the
# output with error messages that are catched anyway,
# we'll temporarly disable logging. Otherwise, error log
# messages and traceback would be printed so that someone
# might think that tests failed even though they succeeded.
logging.disable(logging.ERROR)
with self.assertRaises(OSError):
with mock.patch('os.makedirs', side_effect=OSError('test')):
self.vocab.compile(phrases)
with self.assertRaises(OSError):
with mock.patch('%s.open' % vocabcompiler.__name__,
create=True,
side_effect=OSError('test')):
self.vocab.compile(phrases)
class StrangeCompilationError(Exception):
pass
with mock.patch.object(self.vocab, '_compile_vocabulary',
side_effect=StrangeCompilationError('test')
):
with self.assertRaises(StrangeCompilationError):
self.vocab.compile(phrases)
with self.assertRaises(StrangeCompilationError):
with mock.patch('os.remove',
side_effect=OSError('test')):
self.vocab.compile(phrases)
# Re-enable logging again
logging.disable(logging.NOTSET)
self.vocab.compile(phrases)
self.assertIsInstance(self.vocab.compiled_revision, str)
self.assertTrue(self.vocab.is_compiled)
self.assertTrue(self.vocab.matches_phrases(phrases))
self.vocab.compile(phrases)
self.vocab.compile(phrases, force=True)
class TestPocketsphinxVocabulary(TestVocabulary):
VOCABULARY = vocabcompiler.PocketsphinxVocabulary
@unittest.skipUnless(hasattr(vocabcompiler, 'cmuclmtk'),
"CMUCLMTK not present")
def testVocabulary(self):
super(TestPocketsphinxVocabulary, self).testVocabulary()
self.assertIsInstance(self.vocab.decoder_kwargs, dict)
self.assertIn('lm', self.vocab.decoder_kwargs)
self.assertIn('dict', self.vocab.decoder_kwargs)
def testPatchedVocabulary(self):
def write_test_vocab(text, output_file):
with open(output_file, "w") as f:
for word in text.split(' '):
f.write("%s\n" % word)
def write_test_lm(text, output_file, **kwargs):
with open(output_file, "w") as f:
f.write("TEST")
class DummyG2P(object):
def __init__(self, *args, **kwargs):
pass
@classmethod
def get_config(self, *args, **kwargs):
return {}
def translate(self, *args, **kwargs):
return {'GOOD': ['G UH D',
'G UW D'],
'BAD': ['B AE D'],
'UGLY': ['AH G L IY']}
with mock.patch('client.vocabcompiler.cmuclmtk',
create=True) as mocked_cmuclmtk:
mocked_cmuclmtk.text2vocab = write_test_vocab
mocked_cmuclmtk.text2lm = write_test_lm
with mock.patch('client.vocabcompiler.PhonetisaurusG2P', DummyG2P):
self.testVocabulary()
| mit |
noisemaster/AdamTestBot | unidecode/x01e.py | 246 | 3853 | data = (
'A', # 0x00
'a', # 0x01
'B', # 0x02
'b', # 0x03
'B', # 0x04
'b', # 0x05
'B', # 0x06
'b', # 0x07
'C', # 0x08
'c', # 0x09
'D', # 0x0a
'd', # 0x0b
'D', # 0x0c
'd', # 0x0d
'D', # 0x0e
'd', # 0x0f
'D', # 0x10
'd', # 0x11
'D', # 0x12
'd', # 0x13
'E', # 0x14
'e', # 0x15
'E', # 0x16
'e', # 0x17
'E', # 0x18
'e', # 0x19
'E', # 0x1a
'e', # 0x1b
'E', # 0x1c
'e', # 0x1d
'F', # 0x1e
'f', # 0x1f
'G', # 0x20
'g', # 0x21
'H', # 0x22
'h', # 0x23
'H', # 0x24
'h', # 0x25
'H', # 0x26
'h', # 0x27
'H', # 0x28
'h', # 0x29
'H', # 0x2a
'h', # 0x2b
'I', # 0x2c
'i', # 0x2d
'I', # 0x2e
'i', # 0x2f
'K', # 0x30
'k', # 0x31
'K', # 0x32
'k', # 0x33
'K', # 0x34
'k', # 0x35
'L', # 0x36
'l', # 0x37
'L', # 0x38
'l', # 0x39
'L', # 0x3a
'l', # 0x3b
'L', # 0x3c
'l', # 0x3d
'M', # 0x3e
'm', # 0x3f
'M', # 0x40
'm', # 0x41
'M', # 0x42
'm', # 0x43
'N', # 0x44
'n', # 0x45
'N', # 0x46
'n', # 0x47
'N', # 0x48
'n', # 0x49
'N', # 0x4a
'n', # 0x4b
'O', # 0x4c
'o', # 0x4d
'O', # 0x4e
'o', # 0x4f
'O', # 0x50
'o', # 0x51
'O', # 0x52
'o', # 0x53
'P', # 0x54
'p', # 0x55
'P', # 0x56
'p', # 0x57
'R', # 0x58
'r', # 0x59
'R', # 0x5a
'r', # 0x5b
'R', # 0x5c
'r', # 0x5d
'R', # 0x5e
'r', # 0x5f
'S', # 0x60
's', # 0x61
'S', # 0x62
's', # 0x63
'S', # 0x64
's', # 0x65
'S', # 0x66
's', # 0x67
'S', # 0x68
's', # 0x69
'T', # 0x6a
't', # 0x6b
'T', # 0x6c
't', # 0x6d
'T', # 0x6e
't', # 0x6f
'T', # 0x70
't', # 0x71
'U', # 0x72
'u', # 0x73
'U', # 0x74
'u', # 0x75
'U', # 0x76
'u', # 0x77
'U', # 0x78
'u', # 0x79
'U', # 0x7a
'u', # 0x7b
'V', # 0x7c
'v', # 0x7d
'V', # 0x7e
'v', # 0x7f
'W', # 0x80
'w', # 0x81
'W', # 0x82
'w', # 0x83
'W', # 0x84
'w', # 0x85
'W', # 0x86
'w', # 0x87
'W', # 0x88
'w', # 0x89
'X', # 0x8a
'x', # 0x8b
'X', # 0x8c
'x', # 0x8d
'Y', # 0x8e
'y', # 0x8f
'Z', # 0x90
'z', # 0x91
'Z', # 0x92
'z', # 0x93
'Z', # 0x94
'z', # 0x95
'h', # 0x96
't', # 0x97
'w', # 0x98
'y', # 0x99
'a', # 0x9a
'S', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'Ss', # 0x9e
'[?]', # 0x9f
'A', # 0xa0
'a', # 0xa1
'A', # 0xa2
'a', # 0xa3
'A', # 0xa4
'a', # 0xa5
'A', # 0xa6
'a', # 0xa7
'A', # 0xa8
'a', # 0xa9
'A', # 0xaa
'a', # 0xab
'A', # 0xac
'a', # 0xad
'A', # 0xae
'a', # 0xaf
'A', # 0xb0
'a', # 0xb1
'A', # 0xb2
'a', # 0xb3
'A', # 0xb4
'a', # 0xb5
'A', # 0xb6
'a', # 0xb7
'E', # 0xb8
'e', # 0xb9
'E', # 0xba
'e', # 0xbb
'E', # 0xbc
'e', # 0xbd
'E', # 0xbe
'e', # 0xbf
'E', # 0xc0
'e', # 0xc1
'E', # 0xc2
'e', # 0xc3
'E', # 0xc4
'e', # 0xc5
'E', # 0xc6
'e', # 0xc7
'I', # 0xc8
'i', # 0xc9
'I', # 0xca
'i', # 0xcb
'O', # 0xcc
'o', # 0xcd
'O', # 0xce
'o', # 0xcf
'O', # 0xd0
'o', # 0xd1
'O', # 0xd2
'o', # 0xd3
'O', # 0xd4
'o', # 0xd5
'O', # 0xd6
'o', # 0xd7
'O', # 0xd8
'o', # 0xd9
'O', # 0xda
'o', # 0xdb
'O', # 0xdc
'o', # 0xdd
'O', # 0xde
'o', # 0xdf
'O', # 0xe0
'o', # 0xe1
'O', # 0xe2
'o', # 0xe3
'U', # 0xe4
'u', # 0xe5
'U', # 0xe6
'u', # 0xe7
'U', # 0xe8
'u', # 0xe9
'U', # 0xea
'u', # 0xeb
'U', # 0xec
'u', # 0xed
'U', # 0xee
'u', # 0xef
'U', # 0xf0
'u', # 0xf1
'Y', # 0xf2
'y', # 0xf3
'Y', # 0xf4
'y', # 0xf5
'Y', # 0xf6
'y', # 0xf7
'Y', # 0xf8
'y', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| mit |
alexjc/pylearn2 | pylearn2/models/mlp.py | 5 | 166132 | """
Multilayer Perceptron
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import logging
import math
import operator
import sys
import warnings
import numpy as np
from theano.compat import six
from theano.compat.six.moves import reduce, xrange
from theano import config
from theano.gof.op import get_debug_values
from theano.sandbox.cuda import cuda_enabled
from theano.sandbox.cuda.dnn import dnn_available, dnn_pool
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.signal.downsample import max_pool_2d
import theano.tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.costs.mlp import Default
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels
# Try to import the fast cudnn library, else fallback to conv2d
if cuda_enabled and dnn_available():
try:
from pylearn2.linear import cudnn2d as conv2d
except ImportError:
from pylearn2.linear import conv2d
else:
from pylearn2.linear import conv2d
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm
from pylearn2.models.model import Model
from pylearn2.monitor import get_monitor_doc
from pylearn2.expr.nnet import arg_of_softmax
from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
from pylearn2.space import CompositeSpace
from pylearn2.space import Conv2DSpace
from pylearn2.space import Space
from pylearn2.space import VectorSpace, IndexSpace
from pylearn2.utils import function
from pylearn2.utils import is_iterable
from pylearn2.utils import py_float_types
from pylearn2.utils import py_integer_types
from pylearn2.utils import safe_union
from pylearn2.utils import safe_zip
from pylearn2.utils import safe_izip
from pylearn2.utils import sharedX
from pylearn2.utils import wraps
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.expr.nnet import (elemwise_kl, kl, compute_precision,
compute_recall, compute_f1)
# Only to be used by the deprecation warning wrapper functions
from pylearn2.costs.mlp import L1WeightDecay as _L1WD
from pylearn2.costs.mlp import WeightDecay as _WD
logger = logging.getLogger(__name__)
logger.debug("MLP changing the recursion limit.")
# We need this to be high enough that the big theano graphs we make
# when doing max pooling via subtensors don't cause python to complain.
# python intentionally declares stack overflow well before the stack
# segment is actually exceeded. But we can't make this value too big
# either, or we'll get seg faults when the python interpreter really
# does go over the stack segment.
# IG encountered seg faults on eos3 (a machine at LISA labo) when using
# 50000 so for now it is set to 40000.
# I think the actual safe recursion limit can't be predicted in advance
# because you don't know how big of a stack frame each function will
# make, so there is not really a "correct" way to do this. Really the
# python interpreter should provide an option to raise the error
# precisely when you're going to exceed the stack segment.
sys.setrecursionlimit(40000)
class Layer(Model):
"""
Abstract class. A Layer of an MLP.
May only belong to one MLP.
Parameters
----------
kwargs : dict
Passed on to the superclass.
Notes
-----
This is not currently a Block because as far as I know the Block interface
assumes every input is a single matrix. It doesn't support using Spaces to
work with composite inputs, stacked multichannel image inputs, etc. If the
Block interface were upgraded to be that flexible, then we could make this
a block.
"""
# When applying dropout to a layer's input, use this for masked values.
# Usually this will be 0, but certain kinds of layers may want to override
# this behaviour.
dropout_input_mask_value = 0.
def get_mlp(self):
"""
Returns the MLP that this layer belongs to.
Returns
-------
mlp : MLP
The MLP that this layer belongs to, or None if it has not been
assigned to an MLP yet.
"""
if hasattr(self, 'mlp'):
return self.mlp
return None
def set_mlp(self, mlp):
"""
Assigns this layer to an MLP. This layer will then use the MLP's
random number generator, batch size, etc. This layer's name must
be unique within the MLP.
Parameters
----------
mlp : MLP
"""
assert self.get_mlp() is None
self.mlp = mlp
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
"""
Returns monitoring channels.
Parameters
----------
state_below : member of self.input_space
A minibatch of states that this Layer took as input.
Most of the time providing state_blow is unnecessary when
state is given.
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
targets : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
Returns
-------
channels : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
return OrderedDict()
def fprop(self, state_below):
"""
Does the forward prop transformation for this layer.
Parameters
----------
state_below : member of self.input_space
A minibatch of states of the layer below.
Returns
-------
state : member of self.output_space
A minibatch of states of this layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement fprop.")
def cost(self, Y, Y_hat):
"""
The cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : theano.gof.Variable
The targets
Y_hat : theano.gof.Variable
The predictions.
Assumed to be the output of the layer's `fprop` method.
The implmentation is permitted to do things like look at the
ancestors of `Y_hat` in the theano graph. This is useful for
e.g. computing numerically stable *log* probabilities when
`Y_hat` is the *probability*.
Returns
-------
cost : theano.gof.Variable
A Theano scalar describing the cost.
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost.")
def cost_from_cost_matrix(self, cost_matrix):
"""
The cost final scalar cost computed from the cost matrix
Parameters
----------
cost_matrix : WRITEME
Examples
--------
>>> # C = model.cost_matrix(Y, Y_hat)
>>> # Do something with C like setting some values to 0
>>> # cost = model.cost_from_cost_matrix(C)
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"mlp.Layer.cost_from_cost_matrix.")
def cost_matrix(self, Y, Y_hat):
"""
The element wise cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : WRITEME
Y_hat : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost_matrix")
def set_weights(self, weights):
"""
Sets the weights of the layer.
Parameters
----------
weights : ndarray
A numpy ndarray containing the desired weights of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_weights.")
def get_biases(self):
"""
Returns the value of the biases of the layer.
Returns
-------
biases : ndarray
A numpy ndarray containing the biases of the layer. This docstring
is provided by the Layer base class. Layer subclasses should add
their own docstring explaining the subclass-specific format of the
ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"get_biases (perhaps because the class has no biases).")
def set_biases(self, biases):
"""
Sets the biases of the layer.
Parameters
----------
biases : ndarray
A numpy ndarray containing the desired biases of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"set_biases (perhaps because the class has no biases).")
def get_weights_format(self):
"""
Returns a description of how to interpret the weights of the layer.
Returns
-------
format: tuple
Either ('v', 'h') or ('h', 'v').
('v', 'h') means a weight matrix of shape
(num visible units, num hidden units),
while ('h', 'v') means the transpose of it.
"""
raise NotImplementedError
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_weight_decay.")
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for an L1 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the L1 weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_l1_weight_decay.")
def set_input_space(self, space):
"""
Tells the layer to prepare for input formatted according to the
given space.
Parameters
----------
space : Space
The Space the input to this layer will lie in.
Notes
-----
This usually resets parameters.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_input_space.")
class MLP(Layer):
"""
A multilayer perceptron.
Note that it's possible for an entire MLP to be a single layer of a larger
MLP.
Parameters
----------
layers : list
A list of Layer objects. The final layer specifies the output space
of this MLP.
batch_size : int, optional
If not specified then must be a positive integer. Mostly useful if
one of your layers involves a Theano op like convolution that
requires a hard-coded batch size.
nvis : int, optional
Number of "visible units" (input units). Equivalent to specifying
`input_space=VectorSpace(dim=nvis)`. Note that certain methods require
a different type of input space (e.g. a Conv2Dspace in the case of
convnets). Use the input_space parameter in such cases. Should be
None if the MLP is part of another MLP.
input_space : Space object, optional
A Space specifying the kind of input the MLP accepts. If None,
input space is specified by nvis. Should be None if the MLP is
part of another MLP.
input_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the input sources this
MLP accepts. The structure should match that of input_space. The
default is 'features'. Note that this argument is ignored when
the MLP is nested.
target_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the target sources this
MLP accepts. The structure should match that of target_space. The
default is 'targets'. Note that this argument is ignored when
the MLP is nested.
layer_name : name of the MLP layer. Should be None if the MLP is
not part of another MLP.
seed : WRITEME
monitor_targets : bool, optional
Default: True
If true, includes monitoring channels that are functions of the
targets. This can be disabled to allow monitoring on monitoring
datasets that do not include targets.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, layers, batch_size=None, input_space=None,
input_source='features', target_source='targets',
nvis=None, seed=None, layer_name=None, monitor_targets=True,
**kwargs):
super(MLP, self).__init__(**kwargs)
self.seed = seed
assert isinstance(layers, list)
assert all(isinstance(layer, Layer) for layer in layers)
assert len(layers) >= 1
self.layer_name = layer_name
self.layer_names = set()
for layer in layers:
assert layer.get_mlp() is None
if layer.layer_name in self.layer_names:
raise ValueError("MLP.__init__ given two or more layers "
"with same name: " + layer.layer_name)
layer.set_mlp(self)
self.layer_names.add(layer.layer_name)
self.layers = layers
self.batch_size = batch_size
self.force_batch_size = batch_size
self._input_source = input_source
self._target_source = target_source
self.monitor_targets = monitor_targets
if input_space is not None or nvis is not None:
self._nested = False
self.setup_rng()
# check if the layer_name is None (the MLP is the outer MLP)
assert layer_name is None
if nvis is not None:
input_space = VectorSpace(nvis)
# Check whether the input_space and input_source structures match
try:
DataSpecsMapping((input_space, input_source))
except ValueError:
raise ValueError("The structures of `input_space`, %s, and "
"`input_source`, %s do not match. If you "
"specified a CompositeSpace as an input, "
"be sure to specify the data sources as well."
% (input_space, input_source))
self.input_space = input_space
self._update_layer_input_spaces()
else:
self._nested = True
self.freeze_set = set([])
@property
def input_source(self):
assert not self._nested, "A nested MLP does not have an input source"
return self._input_source
@property
def target_source(self):
assert not self._nested, "A nested MLP does not have a target source"
return self._target_source
def setup_rng(self):
"""
.. todo::
WRITEME
"""
assert not self._nested, "Nested MLPs should use their parent's RNG"
if self.seed is None:
self.seed = [2013, 1, 4]
self.rng = np.random.RandomState(self.seed)
@wraps(Layer.get_default_cost)
def get_default_cost(self):
return Default()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layers[-1].get_output_space()
@wraps(Layer.get_target_space)
def get_target_space(self):
return self.layers[-1].get_target_space()
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if hasattr(self, "mlp"):
assert self._nested
self.rng = self.mlp.rng
self.batch_size = self.mlp.batch_size
self.input_space = space
self._update_layer_input_spaces()
def _update_layer_input_spaces(self):
"""
Tells each layer what its input space should be.
Notes
-----
This usually resets the layer's parameters!
"""
layers = self.layers
try:
layers[0].set_input_space(self.get_input_space())
except BadInputSpaceError as e:
raise TypeError("Layer 0 (" + str(layers[0]) + " of type " +
str(type(layers[0])) +
") does not support the MLP's "
+ "specified input space (" +
str(self.get_input_space()) +
" of type " + str(type(self.get_input_space())) +
"). Original exception: " + str(e))
for i in xrange(1, len(layers)):
layers[i].set_input_space(layers[i - 1].get_output_space())
def add_layers(self, layers):
"""
Add new layers on top of the existing hidden layers
Parameters
----------
layers : WRITEME
"""
existing_layers = self.layers
assert len(existing_layers) > 0
for layer in layers:
assert layer.get_mlp() is None
layer.set_mlp(self)
# In the case of nested MLPs, input/output spaces may have not yet
# been initialized
if not self._nested or hasattr(self, 'input_space'):
layer.set_input_space(existing_layers[-1].get_output_space())
existing_layers.append(layer)
assert layer.layer_name not in self.layer_names
self.layer_names.add(layer.layer_name)
def freeze(self, parameter_set):
"""
Freezes some of the parameters (new theano functions that implement
learning will not use them; existing theano functions will continue
to modify them).
Parameters
----------
parameter_set : set
Set of parameters to freeze.
"""
self.freeze_set = self.freeze_set.union(parameter_set)
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
# if the MLP is the outer MLP \
# (ie MLP is not contained in another structure)
if self.monitor_targets:
X, Y = data
else:
X = data
Y = None
rval = self.get_layer_monitoring_channels(state_below=X,
targets=Y)
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
state = state_below
for layer in self.layers:
# We don't go through all the inner layers recursively
state_below = state
state = layer.fprop(state)
args = [state_below, state]
if layer is self.layers[-1] and targets is not None:
args.append(targets)
ch = layer.get_layer_monitoring_channels(*args)
if not isinstance(ch, OrderedDict):
raise TypeError(str((type(ch), layer.layer_name)))
for key in ch:
value = ch[key]
doc = get_monitor_doc(value)
if doc is None:
doc = str(type(layer)) + \
".get_monitoring_channels_from_state did" + \
" not provide any further documentation for" + \
" this channel."
doc = 'This channel came from a layer called "' + \
layer.layer_name + '" of an MLP.\n' + doc
value.__doc__ = doc
rval[layer.layer_name + '_' + key] = value
return rval
def get_monitoring_data_specs(self):
"""
Returns data specs requiring both inputs and targets.
Returns
-------
data_specs: TODO
The data specifications for both inputs and targets.
"""
if not self.monitor_targets:
return (self.get_input_space(), self.get_input_source())
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
@wraps(Layer.get_params)
def get_params(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = []
for layer in self.layers:
for param in layer.get_params():
if param.name is None:
logger.info(type(layer))
layer_params = layer.get_params()
assert not isinstance(layer_params, set)
for param in layer_params:
if param not in rval:
rval.append(param)
rval = [elem for elem in rval if elem not in self.freeze_set]
assert all([elem.name is not None for elem in rval])
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_l1_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Model.set_batch_size)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
self.force_batch_size = batch_size
for layer in self.layers:
layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
@wraps(Layer.get_weights)
def get_weights(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights()
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_view_shape()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_format()
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_topo()
def dropout_fprop(self, state_below, default_input_include_prob=0.5,
input_include_probs=None, default_input_scale=2.,
input_scales=None, per_example=True):
"""
Returns the output of the MLP, when applying dropout to the input and
intermediate layers.
Parameters
----------
state_below : WRITEME
The input to the MLP
default_input_include_prob : WRITEME
input_include_probs : WRITEME
default_input_scale : WRITEME
input_scales : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
Notes
-----
Each input to each layer is randomly included or
excluded for each example. The probability of inclusion is independent
for each input and each example. Each layer uses
`default_input_include_prob` unless that layer's name appears as a key
in input_include_probs, in which case the input inclusion probability
is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for
each layer's input scale is determined by the same scheme as the input
probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
self._validate_layer_names(list(input_include_probs.keys()))
self._validate_layer_names(list(input_scales.keys()))
theano_rng = MRG_RandomStreams(max(self.rng.randint(2 ** 15), 1))
for layer in self.layers:
layer_name = layer.layer_name
if layer_name in input_include_probs:
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if layer_name in input_scales:
scale = input_scales[layer_name]
else:
scale = default_input_scale
state_below = self.apply_dropout(
state=state_below,
include_prob=include_prob,
theano_rng=theano_rng,
scale=scale,
mask_value=layer.dropout_input_mask_value,
input_space=layer.get_input_space(),
per_example=per_example
)
state_below = layer.fprop(state_below)
return state_below
def masked_fprop(self, state_below, mask, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Forward propagate through the network with a dropout mask
determined by an integer (the binary representation of
which is used to generate the mask).
Parameters
----------
state_below : tensor_like
The (symbolic) output state of the layer below.
mask : int
An integer indexing possible binary masks. It should be
< 2 ** get_total_input_dimension(masked_input_layers)
and greater than or equal to 0.
masked_input_layers : list, optional
A list of layer names to mask. If `None`, the input to all layers
(including the first hidden layer) is masked.
default_input_scale : float, optional
The amount to scale inputs in masked layers that do not appear in
`input_scales`. Defaults to 2.
input_scales : dict, optional
A dictionary mapping layer names to floating point numbers
indicating how much to scale input to a given layer.
Returns
-------
masked_output : tensor_like
The output of the forward propagation of the masked network.
"""
if input_scales is not None:
self._validate_layer_names(input_scales)
else:
input_scales = {}
if any(n not in masked_input_layers for n in input_scales):
layers = [n for n in input_scales if n not in masked_input_layers]
raise ValueError("input scales provided for layer not masked: " %
", ".join(layers))
if masked_input_layers is not None:
self._validate_layer_names(masked_input_layers)
else:
masked_input_layers = self.layer_names
num_inputs = self.get_total_input_dimension(masked_input_layers)
assert mask >= 0, "Mask must be a non-negative integer."
if mask > 0 and math.log(mask, 2) > num_inputs:
raise ValueError("mask value of %d too large; only %d "
"inputs to layers (%s)" %
(mask, num_inputs,
", ".join(masked_input_layers)))
def binary_string(x, length, dtype):
"""
Create the binary representation of an integer `x`, padded to
`length`, with dtype `dtype`.
Parameters
----------
length : WRITEME
dtype : WRITEME
Returns
-------
WRITEME
"""
s = np.empty(length, dtype=dtype)
for i in range(length - 1, -1, -1):
if x // (2 ** i) == 1:
s[i] = 1
else:
s[i] = 0
x = x % (2 ** i)
return s
remaining_mask = mask
for layer in self.layers:
if layer.layer_name in masked_input_layers:
scale = input_scales.get(layer.layer_name,
default_input_scale)
n_inputs = layer.get_input_space().get_total_dimension()
layer_dropout_mask = remaining_mask & (2 ** n_inputs - 1)
remaining_mask >>= n_inputs
mask = binary_string(layer_dropout_mask, n_inputs,
'uint8')
shape = layer.get_input_space().get_origin_batch(1).shape
s_mask = T.as_tensor_variable(mask).reshape(shape)
if layer.dropout_input_mask_value == 0:
state_below = state_below * s_mask * scale
else:
state_below = T.switch(s_mask, state_below * scale,
layer.dropout_input_mask_value)
state_below = layer.fprop(state_below)
return state_below
def _validate_layer_names(self, layers):
"""
.. todo::
WRITEME
"""
if any(layer not in self.layer_names for layer in layers):
unknown_names = [layer for layer in layers
if layer not in self.layer_names]
raise ValueError("MLP has no layer(s) named %s" %
", ".join(unknown_names))
def get_total_input_dimension(self, layers):
"""
Get the total number of inputs to the layers whose
names are listed in `layers`. Used for computing the
total number of dropout masks.
Parameters
----------
layers : WRITEME
Returns
-------
WRITEME
"""
self._validate_layer_names(layers)
total = 0
for layer in self.layers:
if layer.layer_name in layers:
total += layer.get_input_space().get_total_dimension()
return total
@wraps(Layer.fprop)
def fprop(self, state_below, return_all=False):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = self.layers[0].fprop(state_below)
rlist = [rval]
for layer in self.layers[1:]:
rval = layer.fprop(rval)
rlist.append(rval)
if return_all:
return rlist
return rval
def apply_dropout(self, state, include_prob, scale, theano_rng,
input_space, mask_value=0, per_example=True):
"""
.. todo::
WRITEME
Parameters
----------
state: WRITEME
include_prob : WRITEME
scale : WRITEME
theano_rng : WRITEME
input_space : WRITEME
mask_value : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
"""
if include_prob in [None, 1.0, 1]:
return state
assert scale is not None
if isinstance(state, tuple):
return tuple(self.apply_dropout(substate, include_prob,
scale, theano_rng, mask_value)
for substate in state)
# TODO: all of this assumes that if it's not a tuple, it's
# a dense tensor. It hasn't been tested with sparse types.
# A method to format the mask (or any other values) as
# the given symbolic type should be added to the Spaces
# interface.
if per_example:
mask = theano_rng.binomial(p=include_prob, size=state.shape,
dtype=state.dtype)
else:
batch = input_space.get_origin_batch(1)
mask = theano_rng.binomial(p=include_prob, size=batch.shape,
dtype=state.dtype)
rebroadcast = T.Rebroadcast(*zip(xrange(batch.ndim),
[s == 1 for s in batch.shape]))
mask = rebroadcast(mask)
if mask_value == 0:
rval = state * mask * scale
else:
rval = T.switch(mask, state * scale, mask_value)
return T.cast(rval, state.dtype)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.layers[-1].cost(Y, Y_hat)
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
return self.layers[-1].cost_matrix(Y, Y_hat)
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return self.layers[-1].cost_from_cost_matrix(cost_matrix)
def cost_from_X(self, data):
"""
Computes self.cost, but takes data=(X, Y) rather than Y_hat as an
argument.
This is just a wrapper around self.cost that computes Y_hat by
calling Y_hat = self.fprop(X)
Parameters
----------
data : WRITEME
"""
self.cost_from_X_data_specs()[0].validate(data)
X, Y = data
Y_hat = self.fprop(X)
return self.cost(Y, Y_hat)
def cost_from_X_data_specs(self):
"""
Returns the data specs needed by cost_from_X.
This is useful if cost_from_X is used in a MethodCost.
"""
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
def __str__(self):
"""
Summarizes the MLP by printing the size and format of the input to all
layers. Feel free to add reasonably concise info as needed.
"""
rval = []
for layer in self.layers:
rval.append(layer.layer_name)
input_space = layer.get_input_space()
rval.append('\tInput space: ' + str(input_space))
rval.append('\tTotal input dimension: ' +
str(input_space.get_total_dimension()))
rval = '\n'.join(rval)
return rval
class Softmax(Layer):
"""
A layer that can apply an optional affine transformation
to vectorial inputs followed by a softmax nonlinearity.
Parameters
----------
n_classes : int
Number of classes for softmax targets.
layer_name : string
Name of Softmax layers.
irange : float
If specified, initialized each weight randomly in
U(-irange, irange).
istdev : float
If specified, initialize each weight randomly from
N(0,istdev).
sparse_init : int
If specified, initial sparse_init number of weights
for each unit from N(0,1).
W_lr_scale : float
Scale for weight learning rate.
b_lr_scale : float
Scale for bias learning rate.
max_row_norm : float
Maximum norm for a row of the weight matrix.
no_affine : boolean
If True, softmax nonlinearity is applied directly to
inputs.
max_col_norm : float
Maximum norm for a column of the weight matrix.
init_bias_target_marginals : dataset
Take the probability distribution of the targets into account to
intelligently initialize biases.
binary_target_dim : int, optional
If your targets are class labels (i.e. a binary vector) then set the
number of targets here so that an IndexSpace of the proper dimension
can be used as the target space. This allows the softmax to compute
the cost much more quickly than if it needs to convert the targets
into a VectorSpace. With binary_target_dim>1, you can use one layer
to simultaneously predict a bag of words (i.e. order is not important,
the same element can be included more than once).
non_redundant : bool
If True, learns only n_classes - 1 biases and weight vectors
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, n_classes, layer_name, irange=None,
istdev=None,
sparse_init=None, W_lr_scale=None,
b_lr_scale=None, max_row_norm=None,
no_affine=False,
max_col_norm=None, init_bias_target_marginals=None,
binary_target_dim=None, non_redundant=False,
**kwargs):
super(Softmax, self).__init__(**kwargs)
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm, axis=0))
if max_row_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_row_norm, axis=1))
if non_redundant:
if init_bias_target_marginals:
msg = ("init_bias_target_marginals currently only works "
"with the overcomplete parameterization.")
raise NotImplementedError(msg)
if isinstance(W_lr_scale, str):
W_lr_scale = float(W_lr_scale)
self.__dict__.update(locals())
del self.self
del self.init_bias_target_marginals
if not isinstance(n_classes, py_integer_types):
raise TypeError("n_classes is of type %s, but must be integer" %
type(n_classes))
if binary_target_dim is not None:
assert isinstance(binary_target_dim, py_integer_types)
self._has_binary_target = True
self._target_space = IndexSpace(dim=binary_target_dim,
max_labels=n_classes)
else:
self._has_binary_target = False
self.output_space = VectorSpace(n_classes)
if not no_affine:
self.b = sharedX(np.zeros((n_classes - self.non_redundant,)),
name='softmax_b')
if init_bias_target_marginals:
y = init_bias_target_marginals.y
if init_bias_target_marginals.y_labels is None:
marginals = y.mean(axis=0)
else:
# compute class frequencies
if np.max(y.shape) != np.prod(y.shape):
raise AssertionError("Use of "
"`init_bias_target_marginals` "
"requires that each example has "
"a single label.")
marginals = np.bincount(y.flat) / float(y.shape[0])
assert marginals.ndim == 1
b = pseudoinverse_softmax_numpy(marginals).astype(self.b.dtype)
assert b.ndim == 1
assert b.dtype == self.b.dtype
self.b.set_value(b)
else:
assert init_bias_target_marginals is None
def __setstate__(self, state):
super(Softmax, self).__setstate__(state)
# Patch old pickle files
if not hasattr(self, 'non_redundant'):
self.non_redundant = False
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = OrderedDict()
if self.W_lr_scale is not None:
assert isinstance(self.W_lr_scale, float)
rval[self.W] = self.W_lr_scale
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
if self.b_lr_scale is not None:
assert isinstance(self.b_lr_scale, float)
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
if not self.no_affine:
W = self.W
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval.update(OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ]))
if (state_below is not None) or (state is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=1)
rval.update(OrderedDict([('mean_max_class', mx.mean()),
('max_max_class', mx.max()),
('min_max_class', mx.min())]))
if (targets is not None):
if ((not self._has_binary_target) or
self.binary_target_dim == 1):
# if binary_target_dim>1, the misclass rate is ill-defined
y_hat = T.argmax(state, axis=1)
y = (targets.reshape(y_hat.shape)
if self._has_binary_target
else T.argmax(targets, axis=1))
misclass = T.neq(y, y_hat).mean()
misclass = T.cast(misclass, config.floatX)
rval['misclass'] = misclass
rval['nll'] = self.cost(Y_hat=state, Y=targets)
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got " +
str(space) + " of type " + str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
if self.no_affine:
desired_dim = self.n_classes - self.non_redundant
assert self.input_dim == desired_dim
else:
desired_dim = self.input_dim
self.desired_space = VectorSpace(desired_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.mlp.rng
if self.no_affine:
self._params = []
else:
num_cols = self.n_classes - self.non_redundant
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, num_cols))
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, num_cols) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, num_cols))
for i in xrange(num_cols):
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0.:
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
self.W = sharedX(W, 'softmax_W')
self._params = [self.b, self.W]
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
desired = self.W.get_value().T
ipt = self.desired_space.np_format_as(desired, self.input_space)
rval = Conv2DSpace.convert_numpy(ipt,
self.input_space.axes,
('b', 0, 1, 'c'))
return rval
@wraps(Layer.get_weights)
def get_weights(self):
if not isinstance(self.input_space, VectorSpace):
raise NotImplementedError()
return self.W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
self.W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
self.desired_space.validate(state_below)
assert state_below.ndim == 2
if not hasattr(self, 'no_affine'):
self.no_affine = False
if self.no_affine:
Z = state_below
else:
assert self.W.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
if self.non_redundant:
zeros = T.alloc(0., Z.shape[0], 1)
Z = T.concatenate((zeros, Z), axis=1)
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return rval
def _cost(self, Y, Y_hat):
z = arg_of_softmax(Y_hat)
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
if self._has_binary_target:
# The following code is the equivalent of accessing log_prob by the
# indices in Y, but it is written such that the computation can
# happen on the GPU rather than CPU.
flat_Y = Y.flatten()
flat_Y.name = 'flat_Y'
flat_log_prob = log_prob.flatten()
flat_log_prob.name = 'flat_log_prob'
range_ = T.arange(Y.shape[0])
if self.binary_target_dim > 1:
# because of an error in optimization (local_useless_tile)
# when tiling with (1, 1)
range_ = T.tile(range_.dimshuffle(0, 'x'),
(1, self.binary_target_dim)).flatten()
flat_indices = flat_Y + range_ * self.n_classes
flat_indices.name = 'flat_indices'
log_prob_of = flat_log_prob[flat_indices].reshape(Y.shape, ndim=2)
log_prob_of.name = 'log_prob_of'
else:
log_prob_of = (Y * log_prob)
return log_prob_of
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat).sum(axis=1)
assert log_prob_of.ndim == 1
rval = log_prob_of.mean()
return - rval
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat)
if self._has_binary_target:
flat_Y = Y.flatten()
flat_matrix = T.alloc(0, (Y.shape[0] * log_prob_of.shape[1]))
flat_indices = flat_Y + T.extra_ops.repeat(
T.arange(Y.shape[0]) * log_prob_of.shape[1], Y.shape[1]
)
log_prob_of = T.set_subtensor(flat_matrix[flat_indices], flat_Y)
return -log_prob_of
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
return coeff * T.sqr(self.W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W = self.W
return coeff * abs(W).sum()
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.no_affine:
return
class SoftmaxPool(Layer):
"""
A hidden layer that uses the softmax function to do max pooling over groups
of units. When the pooling size is 1, this reduces to a standard sigmoidal
MLP layer.
Parameters
----------
detector_layer_dim : WRITEME
layer_name : str
The name of the layer. All layers in an MLP must have a unique name.
pool_size : WRITEME
irange : float, optional
If specified, initialized each weight randomly in U(-irange, irange).
sparse_init : int, optional
If specified, initial sparse_init number of weights for each unit from
N(0,1).
sparse_stdev : WRITEME
include_prob : float, optional
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
init_bias : WRITEME
W_lr_scale : float, optional
Multiply the learning rate on the weights by this constant.
b_lr_scale : float, optional
Multiply the learning rate on the biases by this constant.
mask_weights : WRITEME
max_col_norm : float, optional
Maximum norm for a column of the weight matrix.
"""
def __init__(self,
detector_layer_dim,
layer_name,
pool_size=1,
irange=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_col_norm=None):
super(SoftmaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias,
name=(layer_name + '_b'))
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm, axis=0))
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (self.detector_layer_dim % self.pool_size == 0):
raise ValueError("detector_layer_dim = %d, pool_size = %d. "
"Should be divisible but remainder is %d" %
(self.detector_layer_dim,
self.pool_size,
self.detector_layer_dim % self.pool_size))
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = self.detector_layer_dim / self.pool_size
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) +
" but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
return W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
# Let the PatchViewer decide how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total / cols
return rows, cols
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.detector_layer_dim,
self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, **kwargs):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state_below is not None) or (state is not None):
if state is None:
P = self.fprop(state_below)
else:
P = state
if self.pool_size == 1:
vars_and_prefixes = [(P, '')]
else:
vars_and_prefixes = [(P, 'p_')]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when
# reading the monitor I use inner.outer rather than
# outer_of_inner or something like that because I want
# mean_x.* to appear next to each other in the alphabetical
# list, as these are commonly plotted together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix + key] = val
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
p, h = max_pool_channels(z, self.pool_size)
p.name = self.layer_name + '_p_'
return p
class Linear(Layer):
"""
A "linear model" in machine learning terminology. This would be more
accurately described as an affine model because it adds an offset to
the output as well as doing a matrix multiplication. The output is:
output = T.dot(weights, input) + biases
This class may be used as the output layer of an MLP for regression.
It may also be used as a hidden layer. Most hidden layers classes are
subclasses of this class that add apply a fixed nonlinearity to the
output of the affine transformation provided by this class.
One notable use of this class is to provide "bottleneck" layers.
By using a Linear layer with few hidden units followed by a nonlinear
layer such as RectifiedLinear with many hidden units, one essentially
gets a RectifiedLinear layer with a factored weight matrix, which can
reduce the number of parameters in the model (by making the effective
weight matrix low rank).
Parameters
----------
dim : int
The number of elements in the output of the layer.
layer_name : str
The name of the layer. All layers in an MLP must have a unique name.
irange : float, optional
If specified, initialized each weight randomly in U(-irange, irange).
istdev : float, optional
If specified, initialize each weight randomly from N(0,istdev).
sparse_init : int, optional
If specified, initial sparse_init number of weights for each unit from
N(0,1).
sparse_stdev : WRITEME
include_prob : float
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
Anything that can be broadcasted to a numpy vector.
Provides the initial value of the biases of the model.
When using this class as an output layer (specifically the Linear
class, or subclasses that don't change the output like
LinearGaussian, but not subclasses that change the output, like
Softmax) it can be a good idea to set this to the return value of
the `mean_of_targets` function. This provides the mean value of
all the targets in the training set, so the model is initialized
to a dummy model that predicts the expected value of each output
variable.
W_lr_scale : float, optional
Multiply the learning rate on the weights by this constant.
b_lr_scale : float, optional
Multiply the learning rate on the biases by this constant.
mask_weights : ndarray, optional
If provided, the weights will be multiplied by this mask after each
learning update.
max_row_norm : float, optional
Maximum norm for a row of the weight matrix.
max_col_norm : float, optional
Maximum norm for a column of the weight matrix.
min_col_norm : WRITEME
copy_input : REMOVED
use_abs_loss : bool, optional
If True, the cost function will be mean absolute error rather
than mean squared error.
You can think of mean squared error as fitting a Gaussian
distribution with variance 1, or as learning to predict the mean
of the data.
You can think of mean absolute error as fitting a Laplace
distribution with variance 1, or as learning to predict the
median of the data.
use_bias : bool, optional
If False, does not add the bias term to the output.
kwargs : dict
Passed on to superclass constructor.
"""
def __init__(self,
dim,
layer_name,
irange=None,
istdev=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_row_norm=None,
max_col_norm=None,
min_col_norm=None,
copy_input=None,
use_abs_loss=False,
use_bias=True,
**kwargs):
if copy_input is not None:
raise AssertionError(
"The copy_input option had a bug and has "
"been removed from the library.")
super(Linear, self).__init__(**kwargs)
if use_bias and init_bias is None:
init_bias = 0.
self.__dict__.update(locals())
del self.self
if use_bias:
self.b = sharedX(np.zeros((self.dim,)) + init_bias,
name=(layer_name + '_b'))
else:
assert b_lr_scale is None
init_bias is None
if (((max_col_norm is not None) or (min_col_norm is not None))
and (max_row_norm is not None)):
raise ValueError('Column and row constraint '
'at the same time is forbidden.')
if (max_col_norm is not None) or (min_col_norm is not None):
self.extensions.append(MaxL2FilterNorm(
limit=max_col_norm,
min_limit=min_col_norm,
axis=0))
if max_row_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_row_norm, axis=1))
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
self.output_space = VectorSpace(self.dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.dim))
< self.include_prob)
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, self.dim) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) + " but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
@wraps(Layer.get_params)
def get_params(self):
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
if self.use_bias:
assert self.b.name is not None
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.get_value()
return W
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.dim, self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state is not None) or (state_below is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def _linear_part(self, state_below):
"""
Parameters
----------
state_below : member of input_space
Returns
-------
output : theano matrix
Affine transformation of state_below
"""
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below)
if self.use_bias:
z += self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
return z
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.cost_from_cost_matrix(self.cost_matrix(Y, Y_hat))
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return cost_matrix.sum(axis=1).mean()
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
if(self.use_abs_loss):
return T.abs_(Y - Y_hat)
else:
return T.sqr(Y - Y_hat)
class Tanh(Linear):
"""
A layer that performs an affine transformation of its (vectorial)
input followed by a hyperbolic tangent elementwise nonlinearity.
Parameters
----------
kwargs : dict
Keyword arguments to pass through to `Linear` class constructor.
"""
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.tanh(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Sigmoid(Linear):
"""
A layer that performs an affine transformation of its
input followed by a logistic sigmoid elementwise nonlinearity.
Parameters
----------
monitor_style : string
Values can be any of ['detection', 'one_hot_class',
'bit_vector_class']
'detection' is the default.
- 'detection' : get_monitor_from_state makes no assumptions about
target, reports info about how good model is at
detecting positive bits.
This will monitor precision, recall, and F1 score
based on a detection threshold of 0.5. Note that
these quantities are computed *per-minibatch* and
averaged together. Unless your entire monitoring
dataset fits in one minibatch, this is not the same
as the true F1 score, etc., and will usually
seriously overestimate your performance.
- 'one_hot_class' : get_monitor_from_state assumes target is
one-hot class indicator, even though you're training the
model as k independent sigmoids. Gives info on how
good the argmax over the sigmoids behaves as a classifier.
- 'bit_vector_class' : get_monitor_from_state treats each
sigmoid as predicting a 1 iff its value is > 0.5. Each
example is counted as correct iff all of the bits in its
target are predicted correctly.
This includes as a special case the situation where the
target is a single 0 or 1 label.
- 'classification' : deprecated; originally this string was
used for 'one_hot_class', then due to a miscommunication
it was changed to be used for 'bit_vector_class'.
kwargs : dict
Passed through to the Layer class constructor
"""
def __init__(self, monitor_style='detection', **kwargs):
super(Sigmoid, self).__init__(**kwargs)
if monitor_style == 'classification':
monitor_style = 'bit_vector_class'
warnings.warn("The 'classification' monitor style is deprecated."
" Switch to 'bit_vector_class' (or possibly"
" 'one_hot_class' if your code predates 8f4b62b3df)."
" 'classification' may be removed on or after "
"2015-04-21.")
assert monitor_style in ['one_hot_class', 'bit_vector_class',
'detection']
self.monitor_style = monitor_style
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.sigmoid(p)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
"""
Returns a batch (vector) of
mean across units of KL divergence for each example.
Parameters
----------
Y : theano.gof.Variable
Targets
Y_hat : theano.gof.Variable
Output of `fprop`
mean across units, mean across batch of KL divergence
Notes
-----
Uses KL(P || Q) where P is defined by Y and Q is defined by Y_hat
Currently Y must be purely binary. If it's not, you'll still
get the right gradient, but the value in the monitoring channel
will be wrong.
Y_hat must be generated by fprop, i.e., it must be a symbolic
sigmoid.
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
total = self.kl(Y=Y, Y_hat=Y_hat)
ave = total.mean()
return ave
def kl(self, Y, Y_hat):
"""
Computes the KL divergence.
Parameters
----------
Y : Variable
targets for the sigmoid outputs. Currently Y must be purely binary.
If it's not, you'll still get the right gradient, but the
value in the monitoring channel will be wrong.
Y_hat : Variable
predictions made by the sigmoid layer. Y_hat must be generated by
fprop, i.e., it must be a symbolic sigmoid.
Returns
-------
ave : Variable
average kl divergence between Y and Y_hat.
Notes
-----
Warning: This function expects a sigmoid nonlinearity in the
output layer and it uses kl function under pylearn2/expr/nnet/.
Returns a batch (vector) of mean across units of KL
divergence for each example,
KL(P || Q) where P is defined by Y and Q is defined by Y_hat:
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
batch_axis = self.output_space.get_batch_axis()
div = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
return div
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
rval = elemwise_kl(Y, Y_hat)
assert rval.ndim == 2
return rval
def get_detection_channels_from_state(self, state, target):
"""
Returns monitoring channels when using the layer to do detection
of binary events.
Parameters
----------
state : theano.gof.Variable
Output of `fprop`
target : theano.gof.Variable
The targets from the dataset
Returns
-------
channels : OrderedDict
Dictionary mapping channel names to Theano channel values.
"""
rval = OrderedDict()
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = self.cost(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=0)
fp = ((1 - y) * y_hat).sum(axis=0)
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(Sigmoid, self).get_layer_monitoring_channels(
state=state, targets=targets)
if (targets is not None) and \
((state_below is not None) or (state is not None)):
if state is None:
state = self.fprop(state_below)
if self.monitor_style == 'detection':
rval.update(self.get_detection_channels_from_state(state,
targets))
elif self.monitor_style == 'one_hot_class':
# For this monitor style, we know (by assumption) that
# exactly one bit is always on, so we pick
# the single most likely bit under the model, regardless
# of whether its probability exceeds 0.5
prediction = state.argmax(axis=1)
labels = targets.argmax(axis=1)
incorrect = T.neq(prediction, labels)
misclass = T.cast(incorrect, config.floatX).mean()
rval['misclass'] = misclass
else:
assert self.monitor_style == 'bit_vector_class'
# Threshold Y_hat at 0.5.
prediction = T.gt(state, 0.5)
# If even one feature is wrong for a given training example,
# it's considered incorrect, so we max over columns.
incorrect = T.neq(targets, prediction).max(axis=1)
rval['misclass'] = T.cast(incorrect, config.floatX).mean()
return rval
class RectifiedLinear(Linear):
"""
Rectified linear MLP layer (Glorot and Bengio 2011).
Parameters
----------
left_slope : float
The slope the line should have left of 0.
kwargs : dict
Keyword arguments to pass to `Linear` class constructor.
"""
def __init__(self, left_slope=0.0, **kwargs):
super(RectifiedLinear, self).__init__(**kwargs)
self.left_slope = left_slope
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
# Original: p = p * (p > 0.) + self.left_slope * p * (p < 0.)
# T.switch is faster.
# For details, see benchmarks in
# pylearn2/scripts/benchmark/time_relu.py
p = T.switch(p > 0., p, self.left_slope * p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Softplus(Linear):
"""
An MLP layer using the softplus nonlinearity
h = log(1 + exp(Wx + b))
Parameters
----------
kwargs : dict
Keyword arguments to `Linear` constructor.
"""
def __init__(self, **kwargs):
super(Softplus, self).__init__(**kwargs)
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.softplus(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class SpaceConverter(Layer):
"""
A Layer with no parameters that converts the input from
one space to another.
Parameters
----------
layer_name : str
Name of the layer.
output_space : Space
The space to convert to.
"""
def __init__(self, layer_name, output_space):
super(SpaceConverter, self).__init__()
self.__dict__.update(locals())
del self.self
self._params = []
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.input_space.format_as(state_below, self.output_space)
class ConvNonlinearity(object):
"""
Abstract convolutional nonlinearity class.
"""
def apply(self, linear_response):
"""
Applies the nonlinearity over the convolutional layer.
Parameters
----------
linear_response: Variable
linear response of the layer.
Returns
-------
p: Variable
the response of the layer after the activation function
is applied over.
"""
p = linear_response
return p
def _get_monitoring_channels_for_activations(self, state):
"""
Computes the monitoring channels which does not require targets.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = OrderedDict({})
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
"""
Override the default get_monitoring_channels_from_state function.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
target : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
cost_fn : theano computational graph or None
This is the theano computational graph of a cost function.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = self._get_monitoring_channels_for_activations(state)
return rval
def cost(self, Y, Y_hat, batch_axis):
"""
The cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : theano.gof.Variable
Output of `fprop`
Y_hat : theano.gof.Variable
Targets
batch_axis : integer
axis representing batch dimension
Returns
-------
cost : theano.gof.Variable
0-D tensor describing the cost
"""
raise NotImplementedError(
str(type(self)) + " does not implement cost function.")
class IdentityConvNonlinearity(ConvNonlinearity):
"""
Linear convolutional nonlinearity class.
"""
def __init__(self):
self.non_lin_name = "linear"
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self,
state,
target,
cost_fn=False):
rval = super(IdentityConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
prediction = T.gt(state, 0.5)
incorrect = T.new(target, prediction).max(axis=1)
rval["misclass"] = T.cast(incorrect, config.floatX).mean()
return rval
@wraps(ConvNonlinearity.cost, append=True)
def cost(self, Y, Y_hat, batch_axis):
"""
Notes
-----
Mean squared error across examples in a batch
"""
return T.sum(T.mean(T.sqr(Y-Y_hat), axis=batch_axis))
class RectifierConvNonlinearity(ConvNonlinearity):
"""
A simple rectifier nonlinearity class for convolutional layers.
Parameters
----------
left_slope : float
The slope of the left half of the activation function.
"""
def __init__(self, left_slope=0.0):
"""
Parameters
----------
left_slope : float, optional
left slope for the linear response of the rectifier function.
default is 0.0.
"""
self.non_lin_name = "rectifier"
self.left_slope = left_slope
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the rectifier nonlinearity over the convolutional layer.
"""
p = linear_response * (linear_response > 0.) + self.left_slope *\
linear_response * (linear_response < 0.)
return p
class SigmoidConvNonlinearity(ConvNonlinearity):
"""
Sigmoid nonlinearity class for convolutional layers.
Parameters
----------
monitor_style : str, optional
default monitor_style is "classification".
This determines whether to do classification or detection.
"""
def __init__(self, monitor_style="classification"):
assert monitor_style in ['classification', 'detection']
self.monitor_style = monitor_style
self.non_lin_name = "sigmoid"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the sigmoid nonlinearity over the convolutional layer.
"""
p = T.nnet.sigmoid(linear_response)
return p
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
rval = super(SigmoidConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = cost_fn(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=[0, 1])
fp = ((1 - y) * y_hat).sum(axis=[0, 1])
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
@wraps(ConvNonlinearity.cost, append=True)
def cost(self, Y, Y_hat, batch_axis):
"""
Notes
-----
Cost mean across units, mean across batch of KL divergence
KL(P || Q) where P is defined by Y and Q is defined by Y_hat
KL(P || Q) = p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
"""
ave_total = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
ave = ave_total.mean()
return ave
class TanhConvNonlinearity(ConvNonlinearity):
"""
Tanh nonlinearity class for convolutional layers.
"""
def __init__(self):
self.non_lin_name = "tanh"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the tanh nonlinearity over the convolutional layer.
"""
p = T.tanh(linear_response)
return p
class ConvElemwise(Layer):
"""
Generic convolutional elemwise layer.
Takes the ConvNonlinearity object as an argument and implements
convolutional layer with the specified nonlinearity.
This function can implement:
* Linear convolutional layer
* Rectifier convolutional layer
* Sigmoid convolutional layer
* Tanh convolutional layer
based on the nonlinearity argument that it recieves.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
nonlinearity : object
An instance of a nonlinearity object which might be inherited
from the ConvNonlinearity class.
irange : float, optional
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str, optional
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
sparse_init : WRITEME
include_prob : float, optional
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 1.0.
init_bias : float, optional
All biases are initialized to this number. Default is 0.
W_lr_scale : float or None
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float or None
The learning rate on the biases for this layer is multiplied by this
scaling factor
max_kernel_norm : float or None
If specified, each kernel is constrained to have at most this norm.
pool_type : str or None
The type of the pooling operation performed the convolution.
Default pooling type is max-pooling.
tied_b : bool, optional
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently. Default is true.
detector_normalization : callable or None
See `output_normalization`.
If pooling argument is not provided, detector_normalization
is not applied on the layer.
output_normalization : callable or None
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the maxout units can be normalized prior to the
spatial pooling
- output: the output of the layer, after sptial pooling, can
be normalized as well
kernel_stride : 2-tuple of ints, optional
The stride of the convolution kernel. Default is (1, 1).
"""
def __init__(self,
output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
max_kernel_norm=None,
pool_type=None,
pool_shape=None,
pool_stride=None,
tied_b=None,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise and not both.")
if pool_type is not None:
assert pool_shape is not None, (
"You should specify the shape of the spatial %s-pooling." %
pool_type)
assert pool_stride is not None, (
"You should specify the strides of the spatial %s-pooling." %
pool_type)
assert nonlinearity is not None
super(ConvElemwise, self).__init__()
self.nonlin = nonlinearity
self.__dict__.update(locals())
assert monitor_style in ['classification', 'detection'], (
"%s.monitor_style should be either"
"detection or classification" % self.__class__.__name__)
del self.self
if max_kernel_norm is not None:
self.extensions.append(
MaxL2FilterNorm(max_kernel_norm, axis=(1, 2, 3))
)
def initialize_transformer(self, rng):
"""
This function initializes the transformer of the class. Re-running
this function will reset the transformer.
Parameters
----------
rng : object
random number generator object.
"""
if self.irange is not None:
assert self.sparse_init is None
self.transformer = conv2d.make_random_conv2D(
irange=self.irange,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
elif self.sparse_init is not None:
self.transformer = conv2d.make_sparse_random_conv2D(
num_nonzero=self.sparse_init,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
else:
raise ValueError('irange and sparse_init cannot be both None')
def initialize_output_space(self):
"""
Initializes the output space of the ConvElemwise layer by taking
pooling operator and the hyperparameters of the convolutional layer
into consideration as well.
"""
dummy_batch_size = self.mlp.batch_size
if dummy_batch_size is None:
dummy_batch_size = 2
dummy_detector =\
sharedX(self.detector_space.get_origin_batch(dummy_batch_size))
if self.pool_type is not None:
assert self.pool_type in ['max', 'mean']
if self.pool_type == 'max':
dummy_p = max_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
dummy_p = mean_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
dummy_p = dummy_p.eval()
self.output_space = Conv2DSpace(shape=[dummy_p.shape[2],
dummy_p.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
else:
dummy_detector = dummy_detector.eval()
self.output_space = Conv2DSpace(shape=[dummy_detector.shape[2],
dummy_detector.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
logger.info('Output space: {0}'.format(self.output_space.shape))
@wraps(Layer.set_input_space)
def set_input_space(self, space):
""" Note: this function will reset the parameters! """
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise BadInputSpaceError(self.__class__.__name__ +
".set_input_space "
"expected a Conv2DSpace, got " +
str(space) + " of type " +
str(type(space)))
rng = self.mlp.rng
if self.border_mode == 'valid':
output_shape = [int((self.input_space.shape[0]
- self.kernel_shape[0])
/ self.kernel_stride[0]) + 1,
int((self.input_space.shape[1]
- self.kernel_shape[1])
/ self.kernel_stride[1]) + 1]
elif self.border_mode == 'full':
output_shape = [int((self.input_space.shape[0]
+ self.kernel_shape[0])
/ self.kernel_stride[0]) - 1,
int((self.input_space.shape[1]
+ self.kernel_shape[1])
/ self.kernel_stride[1]) - 1]
self.detector_space = Conv2DSpace(shape=output_shape,
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
self.initialize_transformer(rng)
W, = self.transformer.get_params()
W.name = self.layer_name + '_W'
if self.tied_b:
self.b = sharedX(np.zeros((self.detector_space.num_channels)) +
self.init_bias)
else:
self.b = sharedX(self.detector_space.get_origin() + self.init_bias)
self.b.name = self.layer_name + '_b'
logger.info('Input shape: {0}'.format(self.input_space.shape))
logger.info('Detector space: {0}'.format(self.detector_space.shape))
self.initialize_output_space()
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
outp, inp, rows, cols = range(4)
raw = self.transformer._filters.get_value()
return np.transpose(raw, (outp, rows, cols, inp))
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 4
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(1, 2, 3)))
rval = OrderedDict([
('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()),
])
cst = self.cost
orval = self.nonlin.get_monitoring_channels_from_state(state,
targets,
cost_fn=cst)
rval.update(orval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
z = self.transformer.lmul(state_below)
if not hasattr(self, 'tied_b'):
self.tied_b = False
if self.tied_b:
b = self.b.dimshuffle('x', 0, 'x', 'x')
else:
b = self.b.dimshuffle('x', 0, 1, 2)
z = z + b
d = self.nonlin.apply(z)
if self.layer_name is not None:
d.name = self.layer_name + '_z'
self.detector_space.validate(d)
if self.pool_type is not None:
# Format the input to be supported by max pooling
if not hasattr(self, 'detector_normalization'):
self.detector_normalization = None
if self.detector_normalization:
d = self.detector_normalization(d)
assert self.pool_type in ['max', 'mean'], ("pool_type should be"
"either max or mean"
"pooling.")
if self.pool_type == 'max':
p = max_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
p = mean_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
self.output_space.validate(p)
else:
p = d
if not hasattr(self, 'output_normalization'):
self.output_normalization = None
if self.output_normalization:
p = self.output_normalization(p)
return p
@wraps(Layer.cost, append=True)
def cost(self, Y, Y_hat):
"""
Notes
-----
The cost method calls `self.nonlin.cost`
"""
batch_axis = self.output_space.get_batch_axis()
return self.nonlin.cost(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
class ConvRectifiedLinear(ConvElemwise):
"""
A convolutional rectified linear layer, based on theano's B01C
formatted convolution.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
irange : float
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
include_prob : float
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 0.
init_bias : float
All biases are initialized to this number
W_lr_scale : float
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float
The learning rate on the biases for this layer is multiplied by this
scaling factor
left_slope : float
The slope of the left half of the activation function
max_kernel_norm : float
If specifed, each kernel is constrained to have at most this norm.
pool_type :
The type of the pooling operation performed the the convolution.
Default pooling type is max-pooling.
tied_b : bool
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently.
detector_normalization : callable
See `output_normalization`
output_normalization : callable
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the rectifier units can be normalized prior to the
spatial pooling
- output: the output of the layer, after spatial pooling, can
be normalized as well
kernel_stride : tuple
The stride of the convolution kernel. A two-tuple of ints.
"""
def __init__(self,
output_channels,
kernel_shape,
pool_shape,
pool_stride,
layer_name,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
left_slope=0.0,
max_kernel_norm=None,
pool_type='max',
tied_b=False,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
nonlinearity = RectifierConvNonlinearity(left_slope)
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear and not both.")
# Alias the variables for pep8
mkn = max_kernel_norm
dn = detector_normalization
on = output_normalization
super(ConvRectifiedLinear, self).__init__(output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=irange,
border_mode=border_mode,
sparse_init=sparse_init,
include_prob=include_prob,
init_bias=init_bias,
W_lr_scale=W_lr_scale,
b_lr_scale=b_lr_scale,
pool_shape=pool_shape,
pool_stride=pool_stride,
max_kernel_norm=mkn,
pool_type=pool_type,
tied_b=tied_b,
detector_normalization=dn,
output_normalization=on,
kernel_stride=kernel_stride,
monitor_style=monitor_style)
def pool_dnn(bc01, pool_shape, pool_stride, mode='max'):
"""
cuDNN pooling op.
Parameters
----------
bc01 : theano tensor
Minibatch in format (batch size, channels, rows, cols).
pool_shape : tuple
Shape of the pool region (rows, cols).
pool_stride : tuple
Strides between pooling regions (row stride, col stride).
mode : str
Flag for `mean` or `max` pooling.
Returns
-------
mx : theano tensor
The output of pooling applied to `bc01`.
"""
assert mode in ['max', 'mean']
if mode == 'mean':
raise NotImplementedError('Mean pooling is not implemented '
'in Pylearn2 using cuDNN as of '
'January 19th, 2015.')
mx = dnn_pool(bc01, tuple(pool_shape), tuple(pool_stride), mode)
return mx
def max_pool(bc01, pool_shape, pool_stride, image_shape, try_dnn=True):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
try_dnn : bool
Flag to set cuDNN use (default: True).
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool_c01b : Same functionality but with ('c', 0, 1, 'b') axes
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality as
`max_pool_c01b` but GPU-only and considerably faster.
mean_pool : Mean pooling instead of max pooling
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr <= r
assert pc <= c
name = bc01.name
if name is None:
name = 'anon_bc01'
if try_dnn and bc01.dtype == "float32":
use_dnn = dnn_available()
else:
use_dnn = False
if pool_shape == pool_stride and not use_dnn:
mx = max_pool_2d(bc01, pool_shape, False)
mx.name = 'max_pool(' + name + ')'
return mx
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
# Catch case where p_strd > p_shp causes pool
# to be set outside of im_shp.
if p_strd * rval >= im_shp:
rval -= 1
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
if (required_r > r) or (required_c > c):
small_r = min(required_r, r)
small_c = min(required_c, c)
assert bc01.dtype.startswith('float')
wide_infinity = T.alloc(T.constant(-np.inf, dtype=bc01.dtype),
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
bc01 = T.set_subtensor(wide_infinity[:, :, 0:small_r, 0:small_c],
bc01[:, :, 0:small_r, 0:small_c])
name = 'infinite_padded_' + name
if use_dnn:
mx = pool_dnn(bc01, pool_shape, pool_stride, 'max')
else:
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('max_pool_cur_' + name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + name + '_' +
str(row_within_pool) + '_' +
str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def max_pool_c01b(c01b, pool_shape, pool_stride, image_shape):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
c01b : theano tensor
minibatch in format (channels, rows, cols, batch size)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `c01b`
See Also
--------
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality but GPU-only
and considerably faster.
max_pool : Same functionality but with ('b', 0, 1, 'c') axes
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr > 0
assert pc > 0
assert pr <= r
assert pc <= c
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for c01bv in get_debug_values(c01b):
assert not contains_inf(c01bv)
assert c01bv.shape[1] == r
assert c01bv.shape[2] == c
wide_infinity = T.alloc(-np.inf,
c01b.shape[0],
required_r,
required_c,
c01b.shape[3])
name = c01b.name
if name is None:
name = 'anon_bc01'
c01b = T.set_subtensor(wide_infinity[:, 0:r, 0:c, :], c01b)
c01b.name = 'infinite_padded_' + name
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = c01b[:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs,
:]
cur.name = ('max_pool_cur_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def mean_pool(bc01, pool_shape, pool_stride, image_shape):
"""
Does mean pooling (aka average pooling) via a Theano graph.
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
(rows, cols) tuple to avoid doing some arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool : Same thing but with max pooling
Examples
--------
>>> import theano
>>> import theano.tensor as T
>>> from pylearn2.models.mlp import mean_pool
>>> import numpy as np
>>> t = np.array([[1, 1, 3, 3],
... [1, 1, 3, 3],
... [5, 5, 7, 7],
... [5, 5, 7, 7],
... [9, 9, 11, 11],
... [9, 9, 11, 11]])
>>> X = np.zeros((3, t.shape[0], t.shape[1]))
>>> X[:] = t
>>> X = X[np.newaxis]
>>> X_sym = T.tensor4('X')
>>> pool_it = mean_pool(X_sym, pool_shape=(2, 2), pool_stride=(2, 2),
... image_shape=(6, 4))
>>> f = theano.function(inputs=[X_sym], outputs=pool_it)
This will pool over over windows of size (2, 2) while also stepping by this
same amount, shrinking the examples input to [[1, 3], [5, 7], [9, 11]].
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
wide_infinity = T.alloc(-np.inf,
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
name = bc01.name
if name is None:
name = 'anon_bc01'
bc01 = T.set_subtensor(wide_infinity[:, :, 0:r, 0:c], bc01)
bc01.name = 'infinite_padded_' + name
# Create a 'mask' used to keep count of the number of elements summed for
# each position
wide_infinity_count = T.alloc(0, bc01.shape[0], bc01.shape[1], required_r,
required_c)
bc01_count = T.set_subtensor(wide_infinity_count[:, :, 0:r, 0:c], 1)
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('mean_pool_cur_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
cur_count = bc01_count[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
if mx is None:
mx = cur
count = cur_count
else:
mx = mx + cur
count = count + cur_count
mx.name = ('mean_pool_mx_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx /= count
mx.name = 'mean_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
@wraps(_WD)
def WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _WD(*args, **kwargs)
@wraps(_L1WD)
def L1WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.L1WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _L1WD(*args, **kwargs)
class LinearGaussian(Linear):
"""
A Linear layer augmented with a precision vector, for modeling
conditionally Gaussian data.
Specifically, given an input x, this layer models the distrbution over
the output as
y ~ p(y | x) = N(y | Wx + b, beta^-1)
i.e., y is conditionally Gaussian with mean Wx + b and variance
beta^-1.
beta is a diagonal precision matrix so beta^-1 is a diagonal covariance
matrix.
Internally, beta is stored as the vector of diagonal values on this
matrix.
Since the output covariance is not a function of the input, this does
not provide an example-specific estimate of the error in the mean.
However, the vector-valued beta does mean that maximizing log p(y | x)
will reweight the mean squared error so that variables that can be
estimated easier will receive a higher penalty. This is one way of
adapting the model better to heterogenous data.
Parameters
----------
init_beta : float or ndarray
Any value > 0 that can be broadcasted to a vector of shape (dim, ).
The elements of beta are initialized to this value.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
min_beta : float
The elements of beta are constrained to be >= this value.
This value must be > 0., otherwise the output conditional is not
constrained to be a valid probability distribution.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
A trained model should always be able to obtain at least this much
precision, at least on the training set.
max_beta : float
The elements of beta are constrained to be <= this value.
We impose this constraint because for problems
where the training set values can be predicted
exactly, beta can grow without bound, which also makes the
gradients grow without bound, resulting in numerical problems.
kwargs : dict
Arguments to the `Linear` superclass.
"""
def __init__(self, init_beta, min_beta, max_beta, beta_lr_scale, **kwargs):
super(LinearGaussian, self).__init__(**kwargs)
self.__dict__.update(locals())
del self.self
del self.kwargs
@wraps(Layer.set_input_space)
def set_input_space(self, space):
super(LinearGaussian, self).set_input_space(space)
assert isinstance(self.output_space, VectorSpace)
self.beta = sharedX(self.output_space.get_origin() + self.init_beta,
'beta')
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(LinearGaussian,
self).get_layer_monitoring_channels(state_below,
state,
targets)
assert isinstance(rval, OrderedDict)
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
if targets:
rval['mse'] = T.sqr(state - targets).mean()
return rval
@wraps(Linear.cost)
def cost(self, Y, Y_hat):
return (0.5 * T.dot(T.sqr(Y - Y_hat), self.beta).mean() -
0.5 * T.log(self.beta).sum())
@wraps(Linear.cost_matrix)
def cost_matrix(self, Y, Y_hat):
return 0.5 * T.sqr(Y - Y_hat) * self.beta - 0.5 * T.log(self.beta)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
super(LinearGaussian, self)._modify_updates(updates)
if self.beta in updates:
updates[self.beta] = T.clip(updates[self.beta],
self.min_beta,
self.max_beta)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = super(LinearGaussian, self).get_lr_scalers()
if self.beta_lr_scale is not None:
rval[self.beta] = self.beta_lr_scale
return rval
@wraps(Layer.get_params)
def get_params(self):
return super(LinearGaussian, self).get_params() + [self.beta]
def beta_from_design(design, min_var=1e-6, max_var=1e6):
"""
Returns the marginal precision of a design matrix.
Parameters
----------
design : ndarray
A numpy ndarray containing a design matrix
min_var : float
max_var : float
All variances are constrained to lie in the range [min_var, max_var]
to avoid numerical issues like infinite precision.
Returns
-------
beta : ndarray
A 1D vector containing the marginal precision of each variable in the
design matrix.
"""
return 1. / np.clip(design.var(axis=0), min_var, max_var)
def beta_from_targets(dataset, **kwargs):
"""
Returns the marginal precision of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
A DenseDesignMatrix with a targets field `y`
kwargs : dict
Extra arguments to `beta_from_design`
Returns
-------
beta : ndarray
A 1-D vector containing the marginal precision of the *targets* in
`dataset`.
"""
return beta_from_design(dataset.y, **kwargs)
def beta_from_features(dataset, **kwargs):
"""
Returns the marginal precision of the features in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
The dataset to compute the precision on.
kwargs : dict
Passed through to `beta_from_design`
Returns
-------
beta : ndarray
Vector of precision values for each feature in `dataset`
"""
return beta_from_design(dataset.X, **kwargs)
def mean_of_targets(dataset):
"""
Returns the mean of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
Returns
-------
mn : ndarray
A 1-D vector with entry i giving the mean of target i
"""
return dataset.y.mean(axis=0)
class PretrainedLayer(Layer):
"""
A layer whose weights are initialized, and optionally fixed,
based on prior training.
Parameters
----------
layer_content : Model
Should implement "upward_pass" (RBM and Autoencoder do this)
freeze_params: bool
If True, regard layer_conent's parameters as fixed
If False, they become parameters of this layer and can be
fine-tuned to optimize the MLP's cost function.
"""
def __init__(self, layer_name, layer_content, freeze_params=False):
super(PretrainedLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@wraps(Layer.set_input_space)
def set_input_space(self, space):
assert self.get_input_space() == space
@wraps(Layer.get_params)
def get_params(self):
if self.freeze_params:
return []
return self.layer_content.get_params()
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.layer_content.get_input_space()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layer_content.get_output_space()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
return OrderedDict([])
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.layer_content.upward_pass(state_below)
class CompositeLayer(Layer):
"""
A Layer that runs several layers in parallel. Its default behavior
is to pass the layer's input to each of the components.
Alternatively, it can take a CompositeSpace as an input and a mapping
from inputs to layers i.e. providing each component layer with a
subset of the inputs.
Parameters
----------
layer_name : str
The name of this layer
layers : tuple or list
The component layers to run in parallel.
inputs_to_layers : dict mapping int to list of ints, optional
Can only be used if the input space is a CompositeSpace.
If inputs_to_layers[i] contains j, it means input i will
be given as input to component j. Note that if multiple inputs are
passed on to e.g. an inner CompositeLayer, the same order will
be maintained. If the list is empty, the input will be discarded.
If an input does not appear in the dictionary, it will be given to
all components.
Examples
--------
>>> composite_layer = CompositeLayer(
... layer_name='composite_layer',
... layers=[Tanh(7, 'h0', 0.1), Sigmoid(5, 'h1', 0.1)],
... inputs_to_layers={
... 0: [1],
... 1: [0]
... })
This CompositeLayer has a CompositeSpace with 2 subspaces as its
input space. The first input is given to the Sigmoid layer, the second
input is given to the Tanh layer.
>>> wrapper_layer = CompositeLayer(
... layer_name='wrapper_layer',
... layers=[Linear(9, 'h2', 0.1),
... composite_layer,
... Tanh(7, 'h3', 0.1)],
... inputs_to_layers={
... 0: [1],
... 2: []
... })
This CompositeLayer takes 3 inputs. The first one is given to the
inner CompositeLayer. The second input is passed on to each component
layer i.e. to the Tanh, Linear as well as CompositeLayer. The third
input is discarded. Note that the inner CompositeLayer wil receive
the inputs with the same ordering i.e. [0, 1], and never [1, 0].
"""
def __init__(self, layer_name, layers, inputs_to_layers=None):
self.num_layers = len(layers)
if inputs_to_layers is not None:
if not isinstance(inputs_to_layers, dict):
raise TypeError("CompositeLayer expected inputs_to_layers to "
"be dict, got " + str(type(inputs_to_layers)))
self.inputs_to_layers = OrderedDict()
for key in sorted(inputs_to_layers):
assert isinstance(key, py_integer_types)
value = inputs_to_layers[key]
assert is_iterable(value)
assert all(isinstance(v, py_integer_types) for v in value)
# Check 'not value' to support case of empty list
assert not value or all(0 <= v < self.num_layers
for v in value)
self.inputs_to_layers[key] = sorted(value)
super(CompositeLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@property
def routing_needed(self):
return self.inputs_to_layers is not None
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if not isinstance(space, CompositeSpace):
if self.inputs_to_layers is not None:
raise ValueError("CompositeLayer received an inputs_to_layers "
"mapping, but does not have a CompositeSpace "
"as its input space, so there is nothing to "
"map. Received " + str(space) + " as input "
"space.")
elif self.routing_needed:
if not max(self.inputs_to_layers) < len(space.components):
raise ValueError("The inputs_to_layers mapping of "
"CompositeSpace contains they key " +
str(max(self.inputs_to_layers)) + " "
"(0-based) but the input space only "
"contains " + str(self.num_layers) + " "
"layers.")
# Invert the dictionary
self.layers_to_inputs = OrderedDict()
for i in xrange(self.num_layers):
inputs = []
for j in xrange(len(space.components)):
if j in self.inputs_to_layers:
if i in self.inputs_to_layers[j]:
inputs.append(j)
else:
inputs.append(j)
self.layers_to_inputs[i] = inputs
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_space = space.restrict(self.layers_to_inputs[i])
else:
cur_space = space
layer.set_input_space(cur_space)
self.input_space = space
self.output_space = CompositeSpace(tuple(layer.get_output_space()
for layer in self.layers))
self._target_space = CompositeSpace(tuple(layer.get_target_space()
for layer in self.layers))
@wraps(Layer.get_params)
def get_params(self):
rval = []
for layer in self.layers:
rval = safe_union(layer.get_params(), rval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
rvals = []
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
rvals.append(layer.fprop(cur_state_below))
return tuple(rvals)
def _weight_decay_aggregate(self, method_name, coeff):
if isinstance(coeff, py_float_types):
return T.sum([getattr(layer, method_name)(coeff)
for layer in self.layers])
elif is_iterable(coeff):
assert all(layer_coeff >= 0 for layer_coeff in coeff)
return T.sum([getattr(layer, method_name)(layer_coeff) for
layer, layer_coeff in safe_zip(self.layers, coeff)
if layer_coeff > 0], dtype=config.floatX)
else:
raise TypeError("CompositeLayer's " + method_name + " received "
"coefficients of type " + str(type(coeff)) + " "
"but must be provided with a float or list/tuple")
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights,
which is the weighted sum of the squared L2 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the squared L2 weight decay penalty for
this layer. If a single value is provided, this coefficient is
used for each component layer. If a list of tuple of
coefficients is given they are passed on to the component
layers in the given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the squared L2 weight decay penalty term for
this layer.
"""
return self._weight_decay_aggregate('get_weight_decay', coeff)
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for a squared L1 penalty on the weights,
which is the weighted sum of the squared L1 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the L1 weight decay penalty for this layer.
If a single value is provided, this coefficient is used for
each component layer. If a list of tuple of coefficients is
given they are passed on to the component layers in the
given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
return self._weight_decay_aggregate('get_l1_weight_decay', coeff)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return sum(layer.cost(Y_elem, Y_hat_elem)
for layer, Y_elem, Y_hat_elem in
safe_zip(self.layers, Y, Y_hat))
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(CompositeLayer, self).set_mlp(mlp)
for layer in self.layers:
layer.set_mlp(mlp)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
# TODO: reduce redundancy with fprop method
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
if state is not None:
cur_state = state[i]
else:
cur_state = None
if targets is not None:
cur_targets = targets[i]
else:
cur_targets = None
d = layer.get_layer_monitoring_channels(
cur_state_below, cur_state, cur_targets)
for key in d:
rval[layer.layer_name + '_' + key] = d[key]
return rval
@wraps(Model._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
class FlattenerLayer(Layer):
"""
A wrapper around a different layer that flattens
the original layer's output.
The cost works by unflattening the target and then
calling the wrapped Layer's cost.
This is mostly intended for use with CompositeLayer as the wrapped
Layer, and is mostly useful as a workaround for theano not having
a TupleVariable with which to represent a composite target.
There are obvious memory, performance, and readability issues with doing
this, so really it would be better for theano to support TupleTypes.
See pylearn2.sandbox.tuple_var and the theano-dev e-mail thread
"TupleType".
Parameters
----------
raw_layer : Layer
Layer that FlattenerLayer wraps.
"""
def __init__(self, raw_layer):
super(FlattenerLayer, self).__init__()
self.__dict__.update(locals())
del self.self
self.layer_name = raw_layer.layer_name
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.raw_layer.set_input_space(space)
total_dim = self.raw_layer.get_output_space().get_total_dimension()
self.output_space = VectorSpace(total_dim)
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.raw_layer.get_input_space()
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
return self.raw_layer.get_monitoring_channels(data)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
raw_space = self.raw_layer.get_output_space()
state = raw_space.undo_format_as(state,
self.get_output_space())
if targets is not None:
targets = self.get_target_space().format_as(
targets, self.raw_layer.get_target_space())
return self.raw_layer.get_layer_monitoring_channels(
state_below=state_below,
state=state,
targets=targets
)
@wraps(Layer.get_monitoring_data_specs)
def get_monitoring_data_specs(self):
return self.raw_layer.get_monitoring_data_specs()
@wraps(Layer.get_params)
def get_params(self):
return self.raw_layer.get_params()
@wraps(Layer.get_weights)
def get_weights(self):
return self.raw_layer.get_weights()
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
return self.raw_layer.get_weight_decay(coeffs)
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
return self.raw_layer.get_l1_weight_decay(coeffs)
@wraps(Layer.set_batch_size)
def set_batch_size(self, batch_size):
self.raw_layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
self.raw_layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return self.raw_layer.get_lr_scalers()
@wraps(Layer.fprop)
def fprop(self, state_below):
raw = self.raw_layer.fprop(state_below)
return self.raw_layer.get_output_space().format_as(raw,
self.output_space)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
raw_space = self.raw_layer.get_output_space()
target_space = self.output_space
raw_Y = target_space.format_as(Y, raw_space)
raw_Y_hat = raw_space.undo_format_as(Y_hat, target_space)
raw_space.validate(raw_Y_hat)
return self.raw_layer.cost(raw_Y, raw_Y_hat)
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(FlattenerLayer, self).set_mlp(mlp)
self.raw_layer.set_mlp(mlp)
class WindowLayer(Layer):
"""
Layer used to select a window of an image input.
The input of the layer must be Conv2DSpace.
Parameters
----------
layer_name : str
A name for this layer.
window : tuple
A four-tuple of ints indicating respectively
the top left x and y position, and
the bottom right x and y position of the window.
"""
def __init__(self, layer_name, window):
super(WindowLayer, self).__init__()
self.__dict__.update(locals())
del self.self
if window[0] < 0 or window[0] > window[2] or \
window[1] < 0 or window[1] > window[3]:
raise ValueError("WindowLayer: bad window parameter")
@wraps(Layer.fprop)
def fprop(self, state_below):
extracts = [slice(None), slice(None), slice(None), slice(None)]
extracts[self.rows] = slice(self.window[0], self.window[2] + 1)
extracts[self.cols] = slice(self.window[1], self.window[3] + 1)
extracts = tuple(extracts)
return state_below[extracts]
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise TypeError("The input to a Window layer should be a "
"Conv2DSpace, but layer " + self.layer_name +
" got " + str(type(self.input_space)))
axes = space.axes
self.rows = axes.index(0)
self.cols = axes.index(1)
nrows = space.shape[0]
ncols = space.shape[1]
if self.window[2] + 1 > nrows or self.window[3] + 1 > ncols:
raise ValueError("WindowLayer: bad window shape. "
"Input is [" + str(nrows) + ", " +
str(ncols) + "], "
"but layer " + self.layer_name + " has window "
+ str(self.window))
self.output_space = Conv2DSpace(
shape=[self.window[2] - self.window[0] + 1,
self.window[3] - self.window[1] + 1],
num_channels=space.num_channels,
axes=axes)
@wraps(Layer.get_params)
def get_params(self):
return []
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self):
return []
def generate_dropout_mask(mlp, default_include_prob=0.5,
input_include_probs=None, rng=(2013, 5, 17)):
"""
Generate a dropout mask (as an integer) given inclusion
probabilities.
Parameters
----------
mlp : object
An MLP object.
default_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
Returns
-------
mask : int
An integer indexing a dropout mask for the network,
drawn with the appropriate probability given the
inclusion probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
total_units = 0
mask = 0
for layer in mlp.layers:
if layer.layer_name in input_include_probs:
p = input_include_probs[layer.layer_name]
else:
p = default_include_prob
for _ in xrange(layer.get_input_space().get_total_dimension()):
mask |= int(rng.uniform() < p) << total_units
total_units += 1
return mask
def sampled_dropout_average(mlp, inputs, num_masks,
default_input_include_prob=0.5,
input_include_probs=None,
default_input_scale=2.,
input_scales=None,
rng=(2013, 5, 17),
per_example=False):
"""
Take the geometric mean over a number of randomly sampled
dropout masks for an MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
num_masks : int
The number of masks to sample.
default_input_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
per_example : bool, optional
If `True`, generate a different mask for every single
test example, so you have `num_masks` per example
instead of `num_mask` networks total. If `False`,
`num_masks` masks are fixed in the graph.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction of
all the networks.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
mlp._validate_layer_names(list(input_include_probs.keys()))
mlp._validate_layer_names(list(input_scales.keys()))
if per_example:
outputs = [mlp.dropout_fprop(inputs, default_input_include_prob,
input_include_probs,
default_input_scale,
input_scales)
for _ in xrange(num_masks)]
else:
masks = [generate_dropout_mask(mlp, default_input_include_prob,
input_include_probs, rng)
for _ in xrange(num_masks)]
outputs = [mlp.masked_fprop(inputs, mask, None,
default_input_scale, input_scales)
for mask in masks]
return geometric_mean_prediction(outputs)
def exhaustive_dropout_average(mlp, inputs, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
masked_input_layers : list, optional
A list of layer names whose input should be masked.
Default is all layers (including the first hidden
layer, i.e. mask the input).
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
if masked_input_layers is None:
masked_input_layers = mlp.layer_names
mlp._validate_layer_names(masked_input_layers)
if input_scales is None:
input_scales = {}
mlp._validate_layer_names(input_scales.keys())
if any(key not in masked_input_layers for key in input_scales):
not_in = [key for key in input_scales
if key not in mlp.layer_names]
raise ValueError(", ".join(not_in) + " in input_scales"
" but not masked")
num_inputs = mlp.get_total_input_dimension(masked_input_layers)
outputs = [mlp.masked_fprop(inputs, mask, masked_input_layers,
default_input_scale, input_scales)
for mask in xrange(2 ** num_inputs)]
return geometric_mean_prediction(outputs)
def geometric_mean_prediction(forward_props):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
forward_props : list
A list of Theano graphs corresponding to forward
propagations through the network with different
dropout masks.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
presoftmax = []
for out in forward_props:
assert isinstance(out.owner.op, T.nnet.Softmax)
assert len(out.owner.inputs) == 1
presoftmax.append(out.owner.inputs[0])
average = reduce(operator.add, presoftmax) / float(len(presoftmax))
return T.nnet.softmax(average)
class BadInputSpaceError(TypeError):
"""
An error raised by an MLP layer when set_input_space is given an
object that is not one of the Spaces that layer supports.
"""
def get_lr_scalers_from_layers(owner):
"""
Get the learning rate scalers for all member layers of
`owner`.
Parameters
----------
owner : Model
Any Model with a `layers` field
Returns
-------
lr_scalers : OrderedDict
A dictionary mapping parameters of `owner` to learning
rate scalers.
"""
rval = OrderedDict()
params = owner.get_params()
for layer in owner.layers:
contrib = layer.get_lr_scalers()
assert isinstance(contrib, OrderedDict)
# No two layers can contend to scale a parameter
assert not any([key in rval for key in contrib])
# Don't try to scale anything that's not a parameter
assert all([key in params for key in contrib])
rval.update(contrib)
assert all([isinstance(val, float) for val in rval.values()])
return rval
| bsd-3-clause |
forging2012/taiga-back | taiga/mdrender/extensions/semi_sane_lists.py | 25 | 1281 | # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import markdown
class SemiSaneOListProcessor(markdown.blockprocessors.OListProcessor):
SIBLING_TAGS = ['ol']
class SemiSaneUListProcessor(markdown.blockprocessors.UListProcessor):
SIBLING_TAGS = ['ul']
class SemiSaneListExtension(markdown.Extension):
"""An extension that causes lists to be treated the same way GitHub does.
Like the sane_lists extension, GitHub considers a list to end if it's
separated by multiple newlines from another list of a different type. Unlike
the sane_lists extension, GitHub will mix list types if they're not
separated by multiple newlines.
GitHub also recognizes lists that start in the middle of a paragraph. This
is currently not supported by this extension, since the Python parser has a
deeply-ingrained belief that blocks are always separated by multiple
newlines.
"""
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors['olist'] = SemiSaneOListProcessor(md.parser)
md.parser.blockprocessors['ulist'] = SemiSaneUListProcessor(md.parser)
| agpl-3.0 |
hamzehd/edx-platform | cms/djangoapps/contentstore/management/commands/prompt.py | 204 | 1122 | import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {
"yes": True,
"y": True,
"ye": True,
"no": False,
"n": False,
}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
| agpl-3.0 |
staranjeet/fjord | fjord/suggest/tests/test_dummy.py | 1 | 2216 | import datetime
from fjord.base.tests import TestCase
from fjord.feedback.tests import ResponseFactory
from fjord.suggest import get_suggesters
from fjord.suggest.utils import get_suggestions
from fjord.suggest.providers.dummy import DummySuggester
from fjord.suggest.tests import SuggesterTestMixin
class DummySuggesterLoadingTestCase(SuggesterTestMixin, TestCase):
suggesters = []
def test_didnt_load(self):
dummy_providers = [
prov for prov in get_suggesters()
if isinstance(prov, DummySuggester)
]
assert len(dummy_providers) == 0
class DummySuggesterTestCase(SuggesterTestMixin, TestCase):
suggesters = [
'fjord.suggest.providers.dummy.DummySuggester'
]
def test_load(self):
dummy_providers = [
prov for prov in get_suggesters()
if isinstance(prov, DummySuggester)
]
assert len(dummy_providers) == 1
def test_get_suggestions(self):
now = u'ts_{0}'.format(datetime.datetime.now())
req = self.get_feedback_post_request({
'happy': 1,
'description': now,
'url': u'http://example.com/{0}'.format(now)
})
feedback = ResponseFactory(
happy=True,
description=now,
url=u'http://example.com/{0}'.format(now)
)
# Try with just the feedback
links = get_suggestions(feedback)
assert len(links) == 1
assert links[0].provider == 'dummy'
assert links[0].provider_version == 1
assert links[0].cssclass == u'document'
assert links[0].summary == u'summary {0}'.format(now)
assert links[0].description == u'description {0}'.format(now)
assert links[0].url == feedback.url
# Now with the feedback and request
links = get_suggestions(feedback, req)
assert len(links) == 1
assert links[0].provider == 'dummy'
assert links[0].provider_version == 1
assert links[0].cssclass == u'document'
assert links[0].summary == u'summary {0}'.format(now)
assert links[0].description == u'description {0}'.format(now)
assert links[0].url == feedback.url
| bsd-3-clause |
sorenh/cc | vendor/boto/boto/pyami/installers/ubuntu/mysql.py | 50 | 4876 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This installer will install mysql-server on an Ubuntu machine.
In addition to the normal installation done by apt-get, it will
also configure the new MySQL server to store it's data files in
a different location. By default, this is /mnt but that can be
configured in the [MySQL] section of the boto config file passed
to the instance.
"""
from boto.pyami.installers.ubuntu.installer import Installer
import os
import boto
from boto.utils import ShellCommand
from ConfigParser import SafeConfigParser
import time
ConfigSection = """
[MySQL]
root_password = <will be used as MySQL root password, default none>
data_dir = <new data dir for MySQL, default is /mnt>
"""
class MySQL(Installer):
def install(self):
self.run('apt-get update')
self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True)
# def set_root_password(self, password=None):
# if not password:
# password = boto.config.get('MySQL', 'root_password')
# if password:
# self.run('mysqladmin -u root password %s' % password)
# return password
def change_data_dir(self, password=None):
data_dir = boto.config.get('MySQL', 'data_dir', '/mnt')
fresh_install = False;
is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running
is_mysql_running_command.run()
if is_mysql_running_command.getStatus() == 0:
# mysql is running. This is the state apt-get will leave it in. If it isn't running,
# that means mysql was already installed on the AMI and there's no need to stop it,
# saving 40 seconds on instance startup.
time.sleep(10) #trying to stop mysql immediately after installing it fails
# We need to wait until mysql creates the root account before we kill it
# or bad things will happen
i = 0
while self.run("echo 'quit' | mysql -u root") != 0 and i<5:
time.sleep(5)
i = i + 1
self.run('/etc/init.d/mysql stop')
self.run("pkill -9 mysql")
mysql_path = os.path.join(data_dir, 'mysql')
if not os.path.exists(mysql_path):
self.run('mkdir %s' % mysql_path)
fresh_install = True;
self.run('chown -R mysql:mysql %s' % mysql_path)
fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w')
fp.write('# created by pyami\n')
fp.write('# use the %s volume for data\n' % data_dir)
fp.write('[mysqld]\n')
fp.write('datadir = %s\n' % mysql_path)
fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log'))
fp.close()
if fresh_install:
self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path)
self.start('mysql')
else:
#get the password ubuntu expects to use:
config_parser = SafeConfigParser()
config_parser.read('/etc/mysql/debian.cnf')
password = config_parser.get('client', 'password')
# start the mysql deamon, then mysql with the required grant statement piped into it:
self.start('mysql')
time.sleep(10) #time for mysql to start
grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password
while self.run(grant_command) != 0:
time.sleep(5)
# leave mysqld running
def main(self):
self.install()
# change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i
# and changing that is too ugly to be worth it:
#self.set_root_password()
self.change_data_dir()
| apache-2.0 |
mogoweb/chromium-crosswalk | chrome/test/functional/test_clean_exit.py | 69 | 1495 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import signal
import subprocess
import tempfile
import unittest
import pyauto_functional
import pyauto
import test_utils
class SimpleTest(pyauto.PyUITest):
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
fd, self._strace_log = tempfile.mkstemp()
os.close(fd)
extra_flags = ['--no-sandbox', '--child-clean-exit',
'--renderer-cmd-prefix=/usr/bin/strace -o %s' %
self._strace_log]
logging.debug('Strace file is: %s' % self._strace_log)
return pyauto.PyUITest.ExtraChromeFlags(self) + extra_flags
def testCleanExit(self):
"""Ensures the renderer process cleanly exits."""
url = self.GetHttpURLForDataPath('title2.html')
self.NavigateToURL(url)
os.kill(self.GetBrowserInfo()['browser_pid'], signal.SIGINT)
self.WaitUntil(lambda: self._IsFileOpen(self._strace_log))
strace_contents = open(self._strace_log).read()
self.assertTrue('exit_group' in strace_contents)
os.remove(self._strace_log)
def _IsFileOpen(self, filename):
p = subprocess.Popen(['lsof', filename])
return p.communicate()[0] == ''
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
Spondoolies-Tech/kernel | tools/perf/tests/attr.py | 105 | 9117 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.info(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.info(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" running '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.info(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.info(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.info(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.info(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.info(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
ahmadassaf/zulip | zerver/management/commands/rename_stream.py | 11 | 1417 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_rename_stream
from zerver.lib.str_utils import force_text
from zerver.models import Realm, get_realm
import sys
class Command(BaseCommand):
help = """Change the stream name for a realm."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('domain', metavar='<domain>', type=str,
help="domain to operate on")
parser.add_argument('old_name', metavar='<old name>', type=str,
help='name of stream to be renamed')
parser.add_argument('new_name', metavar='<new name>', type=str,
help='new name to rename the stream to')
def handle(self, *args, **options):
# type: (*Any, **str) -> None
domain = options['domain']
old_name = options['old_name']
new_name = options['new_name']
encoding = sys.getfilesystemencoding()
realm = get_realm(force_text(domain, encoding))
if realm is None:
print("Unknown domain %s" % (domain,))
exit(1)
do_rename_stream(realm, force_text(old_name, encoding),
force_text(new_name, encoding))
| apache-2.0 |
kimiscircle/fdfs_client-py | setup.py | 2 | 1133 | #!/usr/bin/env python
import os
from fdfs_client import __version__
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
f = open(os.path.join(os.path.dirname(__file__), 'README.md'))
long_description = f.read()
f.close()
sdict = {
'name': 'fdfs_client-py',
'version': __version__,
'description': 'Python client for Fastdfs ver 4.06',
'long_description': long_description,
'author': 'scott yuan',
'author_email': '[email protected]',
'maintainer': 'scott yuan',
'maintainer_email': '[email protected]',
'keywords': ['Fastdfs', 'Distribute File System'],
'license': 'GPLV3',
'packages': ['fdfs_client'],
'classifiers': [
'Development Status :: 1 - Production/Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: GPLV3',
'Operating System :: OS Independent',
'Programming Language :: Python'],
'ext_modules': [Extension('fdfs_client.sendfile',
sources=['fdfs_client/sendfilemodule.c'])],
}
setup(**sdict)
| gpl-3.0 |
atomicjets/twitter-for-bigquery | libs/config.py | 6 | 56933 | # Copyright 2004-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This is a configuration module for Python.
This module should work under Python versions >= 2.2, and cannot be used with
earlier versions since it uses new-style classes.
Development and testing has only been carried out (so far) on Python 2.3.4 and
Python 2.4.2. See the test module (test_config.py) included in the
U{distribution<http://www.red-dove.com/python_config.html|_blank>} (follow the
download link).
A simple example - with the example configuration file::
messages:
[
{
stream : `sys.stderr`
message: 'Welcome'
name: 'Harry'
}
{
stream : `sys.stdout`
message: 'Welkom'
name: 'Ruud'
}
{
stream : $messages[0].stream
message: 'Bienvenue'
name: Yves
}
]
a program to read the configuration would be::
from config import Config
f = file('simple.cfg')
cfg = Config(f)
for m in cfg.messages:
s = '%s, %s' % (m.message, m.name)
try:
print >> m.stream, s
except IOError, e:
print e
which, when run, would yield the console output::
Welcome, Harry
Welkom, Ruud
Bienvenue, Yves
See U{this tutorial<http://www.red-dove.com/python_config.html|_blank>} for more
information.
@version: 0.3.9
@author: Vinay Sajip
@copyright: Copyright (C) 2004-2010 Vinay Sajip. All Rights Reserved.
@var streamOpener: The default stream opener. This is a factory function which
takes a string (e.g. filename) and returns a stream suitable for reading. If
unable to open the stream, an IOError exception should be thrown.
The default value of this variable is L{defaultStreamOpener}. For an example
of how it's used, see test_config.py (search for streamOpener).
"""
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "alpha"
__version__ = "0.3.9"
__date__ = "11 May 2010"
from types import StringType, UnicodeType
import codecs
import logging
import os
import sys
WORD = 'a'
NUMBER = '9'
STRING = '"'
EOF = ''
LCURLY = '{'
RCURLY = '}'
LBRACK = '['
LBRACK2 = 'a['
RBRACK = ']'
LPAREN = '('
LPAREN2 = '(('
RPAREN = ')'
DOT = '.'
COMMA = ','
COLON = ':'
AT = '@'
PLUS = '+'
MINUS = '-'
STAR = '*'
SLASH = '/'
MOD = '%'
BACKTICK = '`'
DOLLAR = '$'
TRUE = 'True'
FALSE = 'False'
NONE = 'None'
WORDCHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_"
if sys.platform == 'win32':
NEWLINE = '\r\n'
elif os.name == 'mac':
NEWLINE = '\r'
else:
NEWLINE = '\n'
try:
import encodings.utf_32
has_utf32 = True
except:
has_utf32 = False
try:
from logging.handlers import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
class ConfigInputStream(object):
"""
An input stream which can read either ANSI files with default encoding
or Unicode files with BOMs.
Handles UTF-8, UTF-16LE, UTF-16BE. Could handle UTF-32 if Python had
built-in support.
"""
def __init__(self, stream):
"""
Initialize an instance.
@param stream: The underlying stream to be read. Should be seekable.
@type stream: A stream (file-like object).
"""
encoding = None
signature = stream.read(4)
used = -1
if has_utf32:
if signature == codecs.BOM_UTF32_LE:
encoding = 'utf-32le'
elif signature == codecs.BOM_UTF32_BE:
encoding = 'utf-32be'
if encoding is None:
if signature[:3] == codecs.BOM_UTF8:
used = 3
encoding = 'utf-8'
elif signature[:2] == codecs.BOM_UTF16_LE:
used = 2
encoding = 'utf-16le'
elif signature[:2] == codecs.BOM_UTF16_BE:
used = 2
encoding = 'utf-16be'
else:
used = 0
if used >= 0:
stream.seek(used)
if encoding:
reader = codecs.getreader(encoding)
stream = reader(stream)
self.stream = stream
self.encoding = encoding
def read(self, size):
if (size == 0) or (self.encoding is None):
rv = self.stream.read(size)
else:
rv = u''
while size > 0:
rv += self.stream.read(1)
size -= 1
return rv
def close(self):
self.stream.close()
def readline(self):
if self.encoding is None:
line = ''
else:
line = u''
while True:
c = self.stream.read(1)
if c:
line += c
if c == '\n':
break
return line
class ConfigOutputStream(object):
"""
An output stream which can write either ANSI files with default encoding
or Unicode files with BOMs.
Handles UTF-8, UTF-16LE, UTF-16BE. Could handle UTF-32 if Python had
built-in support.
"""
def __init__(self, stream, encoding=None):
"""
Initialize an instance.
@param stream: The underlying stream to be written.
@type stream: A stream (file-like object).
@param encoding: The desired encoding.
@type encoding: str
"""
if encoding is not None:
encoding = str(encoding).lower()
self.encoding = encoding
if encoding == "utf-8":
stream.write(codecs.BOM_UTF8)
elif encoding == "utf-16be":
stream.write(codecs.BOM_UTF16_BE)
elif encoding == "utf-16le":
stream.write(codecs.BOM_UTF16_LE)
elif encoding == "utf-32be":
stream.write(codecs.BOM_UTF32_BE)
elif encoding == "utf-32le":
stream.write(codecs.BOM_UTF32_LE)
if encoding is not None:
writer = codecs.getwriter(encoding)
stream = writer(stream)
self.stream = stream
def write(self, data):
self.stream.write(data)
def flush(self):
self.stream.flush()
def close(self):
self.stream.close()
def defaultStreamOpener(name):
"""
This function returns a read-only stream, given its name. The name passed
in should correspond to an existing stream, otherwise an exception will be
raised.
This is the default value of L{streamOpener}; assign your own callable to
streamOpener to return streams based on names. For example, you could use
urllib2.urlopen().
@param name: The name of a stream, most commonly a file name.
@type name: str
@return: A stream with the specified name.
@rtype: A read-only stream (file-like object)
"""
return ConfigInputStream(file(name, 'rb'))
streamOpener = None
class ConfigError(Exception):
"""
This is the base class of exceptions raised by this module.
"""
pass
class ConfigFormatError(ConfigError):
"""
This is the base class of exceptions raised due to syntax errors in
configurations.
"""
pass
class ConfigResolutionError(ConfigError):
"""
This is the base class of exceptions raised due to semantic errors in
configurations.
"""
pass
def isWord(s):
"""
See if a passed-in value is an identifier. If the value passed in is not a
string, False is returned. An identifier consists of alphanumerics or
underscore characters.
Examples::
isWord('a word') ->False
isWord('award') -> True
isWord(9) -> False
isWord('a_b_c_') ->True
@note: isWord('9abc') will return True - not exactly correct, but adequate
for the way it's used here.
@param s: The name to be tested
@type s: any
@return: True if a word, else False
@rtype: bool
"""
if type(s) != type(''):
return False
s = s.replace('_', '')
return s.isalnum()
def makePath(prefix, suffix):
"""
Make a path from a prefix and suffix.
Examples::
makePath('', 'suffix') -> 'suffix'
makePath('prefix', 'suffix') -> 'prefix.suffix'
makePath('prefix', '[1]') -> 'prefix[1]'
@param prefix: The prefix to use. If it evaluates as false, the suffix
is returned.
@type prefix: str
@param suffix: The suffix to use. It is either an identifier or an
index in brackets.
@type suffix: str
@return: The path concatenation of prefix and suffix, with a
dot if the suffix is not a bracketed index.
@rtype: str
"""
if not prefix:
rv = suffix
elif suffix[0] == '[':
rv = prefix + suffix
else:
rv = prefix + '.' + suffix
return rv
class Container(object):
"""
This internal class is the base class for mappings and sequences.
@ivar path: A string which describes how to get
to this instance from the root of the hierarchy.
Example::
a.list.of[1].or['more'].elements
"""
def __init__(self, parent):
"""
Initialize an instance.
@param parent: The parent of this instance in the hierarchy.
@type parent: A L{Container} instance.
"""
object.__setattr__(self, 'parent', parent)
def setPath(self, path):
"""
Set the path for this instance.
@param path: The path - a string which describes how to get
to this instance from the root of the hierarchy.
@type path: str
"""
object.__setattr__(self, 'path', path)
def evaluate(self, item):
"""
Evaluate items which are instances of L{Reference} or L{Expression}.
L{Reference} instances are evaluated using L{Reference.resolve},
and L{Expression} instances are evaluated using
L{Expression.evaluate}.
@param item: The item to be evaluated.
@type item: any
@return: If the item is an instance of L{Reference} or L{Expression},
the evaluated value is returned, otherwise the item is returned
unchanged.
"""
if isinstance(item, Reference):
item = item.resolve(self)
elif isinstance(item, Expression):
item = item.evaluate(self)
return item
def writeToStream(self, stream, indent, container):
"""
Write this instance to a stream at the specified indentation level.
Should be redefined in subclasses.
@param stream: The stream to write to
@type stream: A writable stream (file-like object)
@param indent: The indentation level
@type indent: int
@param container: The container of this instance
@type container: L{Container}
@raise NotImplementedError: If a subclass does not override this
"""
raise NotImplementedError
def writeValue(self, value, stream, indent):
if isinstance(self, Mapping):
indstr = ' '
else:
indstr = indent * ' '
if isinstance(value, Reference) or isinstance(value, Expression):
stream.write('%s%r%s' % (indstr, value, NEWLINE))
else:
if (type(value) is StringType): # and not isWord(value):
value = repr(value)
stream.write('%s%s%s' % (indstr, value, NEWLINE))
class Mapping(Container):
"""
This internal class implements key-value mappings in configurations.
"""
def __init__(self, parent=None):
"""
Initialize an instance.
@param parent: The parent of this instance in the hierarchy.
@type parent: A L{Container} instance.
"""
Container.__init__(self, parent)
object.__setattr__(self, 'path', '')
object.__setattr__(self, 'data', {})
object.__setattr__(self, 'order', []) # to preserve ordering
object.__setattr__(self, 'comments', {})
def __delitem__(self, key):
"""
Remove an item
"""
data = object.__getattribute__(self, 'data')
if key not in data:
raise AttributeError(key)
order = object.__getattribute__(self, 'order')
comments = object.__getattribute__(self, 'comments')
del data[key]
order.remove(key)
del comments[key]
def __getitem__(self, key):
data = object.__getattribute__(self, 'data')
if key not in data:
raise AttributeError(key)
rv = data[key]
return self.evaluate(rv)
__getattr__ = __getitem__
def __getattribute__(self, name):
if name == "__dict__":
return {}
if name in ["__methods__", "__members__"]:
return []
#if name == "__class__":
# return ''
data = object.__getattribute__(self, "data")
useData = data.has_key(name)
if useData:
rv = getattr(data, name)
else:
rv = object.__getattribute__(self, name)
if rv is None:
raise AttributeError(name)
return rv
def iteritems(self):
for key in self.keys():
yield(key, self[key])
raise StopIteration
def __contains__(self, item):
order = object.__getattribute__(self, 'order')
return item in order
def addMapping(self, key, value, comment, setting=False):
"""
Add a key-value mapping with a comment.
@param key: The key for the mapping.
@type key: str
@param value: The value for the mapping.
@type value: any
@param comment: The comment for the key (can be None).
@type comment: str
@param setting: If True, ignore clashes. This is set
to true when called from L{__setattr__}.
@raise ConfigFormatError: If an existing key is seen
again and setting is False.
"""
data = object.__getattribute__(self, 'data')
order = object.__getattribute__(self, 'order')
comments = object.__getattribute__(self, 'comments')
data[key] = value
if key not in order:
order.append(key)
elif not setting:
raise ConfigFormatError("repeated key: %s" % key)
comments[key] = comment
def __setattr__(self, name, value):
self.addMapping(name, value, None, True)
__setitem__ = __setattr__
def keys(self):
"""
Return the keys in a similar way to a dictionary.
"""
return object.__getattribute__(self, 'order')
def get(self, key, default=None):
"""
Allows a dictionary-style get operation.
"""
if key in self:
return self[key]
return default
def __str__(self):
return str(object.__getattribute__(self, 'data'))
def __repr__(self):
return repr(object.__getattribute__(self, 'data'))
def __len__(self):
return len(object.__getattribute__(self, 'order'))
def __iter__(self):
return self.iterkeys()
def iterkeys(self):
order = object.__getattribute__(self, 'order')
return order.__iter__()
def writeToStream(self, stream, indent, container):
"""
Write this instance to a stream at the specified indentation level.
Should be redefined in subclasses.
@param stream: The stream to write to
@type stream: A writable stream (file-like object)
@param indent: The indentation level
@type indent: int
@param container: The container of this instance
@type container: L{Container}
"""
indstr = indent * ' '
if len(self) == 0:
stream.write(' { }%s' % NEWLINE)
else:
if isinstance(container, Mapping):
stream.write(NEWLINE)
stream.write('%s{%s' % (indstr, NEWLINE))
self.save(stream, indent + 1)
stream.write('%s}%s' % (indstr, NEWLINE))
def save(self, stream, indent=0):
"""
Save this configuration to the specified stream.
@param stream: A stream to which the configuration is written.
@type stream: A write-only stream (file-like object).
@param indent: The indentation level for the output.
@type indent: int
"""
indstr = indent * ' '
order = object.__getattribute__(self, 'order')
data = object.__getattribute__(self, 'data')
maxlen = 0 # max(map(lambda x: len(x), order))
for key in order:
comment = self.comments[key]
if isWord(key):
skey = key
else:
skey = repr(key)
if comment:
stream.write('%s#%s' % (indstr, comment))
stream.write('%s%-*s :' % (indstr, maxlen, skey))
value = data[key]
if isinstance(value, Container):
value.writeToStream(stream, indent, self)
else:
self.writeValue(value, stream, indent)
class Config(Mapping):
"""
This class represents a configuration, and is the only one which clients
need to interface to, under normal circumstances.
"""
class Namespace(object):
"""
This internal class is used for implementing default namespaces.
An instance acts as a namespace.
"""
def __init__(self):
self.sys = sys
self.os = os
def __repr__(self):
return "<Namespace('%s')>" % ','.join(self.__dict__.keys())
def __init__(self, streamOrFile=None, parent=None):
"""
Initializes an instance.
@param streamOrFile: If specified, causes this instance to be loaded
from the stream (by calling L{load}). If a string is provided, it is
passed to L{streamOpener} to open a stream. Otherwise, the passed
value is assumed to be a stream and used as is.
@type streamOrFile: A readable stream (file-like object) or a name.
@param parent: If specified, this becomes the parent of this instance
in the configuration hierarchy.
@type parent: a L{Container} instance.
"""
Mapping.__init__(self, parent)
object.__setattr__(self, 'reader', ConfigReader(self))
object.__setattr__(self, 'namespaces', [Config.Namespace()])
object.__setattr__(self, 'resolving', set())
if streamOrFile is not None:
if isinstance(streamOrFile, StringType) or isinstance(streamOrFile, UnicodeType):
global streamOpener
if streamOpener is None:
streamOpener = defaultStreamOpener
streamOrFile = streamOpener(streamOrFile)
load = object.__getattribute__(self, "load")
load(streamOrFile)
def load(self, stream):
"""
Load the configuration from the specified stream. Multiple streams can
be used to populate the same instance, as long as there are no
clashing keys. The stream is closed.
@param stream: A stream from which the configuration is read.
@type stream: A read-only stream (file-like object).
@raise ConfigError: if keys in the loaded configuration clash with
existing keys.
@raise ConfigFormatError: if there is a syntax error in the stream.
"""
reader = object.__getattribute__(self, 'reader')
#object.__setattr__(self, 'root', reader.load(stream))
reader.load(stream)
stream.close()
def addNamespace(self, ns, name=None):
"""
Add a namespace to this configuration which can be used to evaluate
(resolve) dotted-identifier expressions.
@param ns: The namespace to be added.
@type ns: A module or other namespace suitable for passing as an
argument to vars().
@param name: A name for the namespace, which, if specified, provides
an additional level of indirection.
@type name: str
"""
namespaces = object.__getattribute__(self, 'namespaces')
if name is None:
namespaces.append(ns)
else:
setattr(namespaces[0], name, ns)
def removeNamespace(self, ns, name=None):
"""
Remove a namespace added with L{addNamespace}.
@param ns: The namespace to be removed.
@param name: The name which was specified when L{addNamespace} was
called.
@type name: str
"""
namespaces = object.__getattribute__(self, 'namespaces')
if name is None:
namespaces.remove(ns)
else:
delattr(namespaces[0], name)
def save(self, stream, indent=0):
"""
Save this configuration to the specified stream. The stream is
closed if this is the top-level configuration in the hierarchy.
L{Mapping.save} is called to do all the work.
@param stream: A stream to which the configuration is written.
@type stream: A write-only stream (file-like object).
@param indent: The indentation level for the output.
@type indent: int
"""
Mapping.save(self, stream, indent)
if indent == 0:
stream.close()
def getByPath(self, path):
"""
Obtain a value in the configuration via its path.
@param path: The path of the required value
@type path: str
@return the value at the specified path.
@rtype: any
@raise ConfigError: If the path is invalid
"""
s = 'self.' + path
try:
return eval(s)
except Exception, e:
raise ConfigError(str(e))
class Sequence(Container):
"""
This internal class implements a value which is a sequence of other values.
"""
class SeqIter(object):
"""
This internal class implements an iterator for a L{Sequence} instance.
"""
def __init__(self, seq):
self.seq = seq
self.limit = len(object.__getattribute__(seq, 'data'))
self.index = 0
def __iter__(self):
return self
def next(self):
if self.index >= self.limit:
raise StopIteration
rv = self.seq[self.index]
self.index += 1
return rv
def __init__(self, parent=None):
"""
Initialize an instance.
@param parent: The parent of this instance in the hierarchy.
@type parent: A L{Container} instance.
"""
Container.__init__(self, parent)
object.__setattr__(self, 'data', [])
object.__setattr__(self, 'comments', [])
def append(self, item, comment):
"""
Add an item to the sequence.
@param item: The item to add.
@type item: any
@param comment: A comment for the item.
@type comment: str
"""
data = object.__getattribute__(self, 'data')
comments = object.__getattribute__(self, 'comments')
data.append(item)
comments.append(comment)
def __getitem__(self, index):
data = object.__getattribute__(self, 'data')
try:
rv = data[index]
except (IndexError, KeyError, TypeError):
raise ConfigResolutionError('%r is not a valid index for %r' % (index, object.__getattribute__(self, 'path')))
if not isinstance(rv, list):
rv = self.evaluate(rv)
else:
# deal with a slice
result = []
for a in rv:
result.append(self.evaluate(a))
rv = result
return rv
def __iter__(self):
return Sequence.SeqIter(self)
def __repr__(self):
return repr(object.__getattribute__(self, 'data'))
def __str__(self):
return str(self[:]) # using the slice evaluates the contents
def __len__(self):
return len(object.__getattribute__(self, 'data'))
def writeToStream(self, stream, indent, container):
"""
Write this instance to a stream at the specified indentation level.
Should be redefined in subclasses.
@param stream: The stream to write to
@type stream: A writable stream (file-like object)
@param indent: The indentation level
@type indent: int
@param container: The container of this instance
@type container: L{Container}
"""
indstr = indent * ' '
if len(self) == 0:
stream.write(' [ ]%s' % NEWLINE)
else:
if isinstance(container, Mapping):
stream.write(NEWLINE)
stream.write('%s[%s' % (indstr, NEWLINE))
self.save(stream, indent + 1)
stream.write('%s]%s' % (indstr, NEWLINE))
def save(self, stream, indent):
"""
Save this instance to the specified stream.
@param stream: A stream to which the configuration is written.
@type stream: A write-only stream (file-like object).
@param indent: The indentation level for the output, > 0
@type indent: int
"""
if indent == 0:
raise ConfigError("sequence cannot be saved as a top-level item")
data = object.__getattribute__(self, 'data')
comments = object.__getattribute__(self, 'comments')
indstr = indent * ' '
for i in xrange(0, len(data)):
value = data[i]
comment = comments[i]
if comment:
stream.write('%s#%s' % (indstr, comment))
if isinstance(value, Container):
value.writeToStream(stream, indent, self)
else:
self.writeValue(value, stream, indent)
class Reference(object):
"""
This internal class implements a value which is a reference to another value.
"""
def __init__(self, config, type, ident):
"""
Initialize an instance.
@param config: The configuration which contains this reference.
@type config: A L{Config} instance.
@param type: The type of reference.
@type type: BACKTICK or DOLLAR
@param ident: The identifier which starts the reference.
@type ident: str
"""
self.config = config
self.type = type
self.elements = [ident]
def addElement(self, type, ident):
"""
Add an element to the reference.
@param type: The type of reference.
@type type: BACKTICK or DOLLAR
@param ident: The identifier which continues the reference.
@type ident: str
"""
self.elements.append((type, ident))
def findConfig(self, container):
"""
Find the closest enclosing configuration to the specified container.
@param container: The container to start from.
@type container: L{Container}
@return: The closest enclosing configuration, or None.
@rtype: L{Config}
"""
while (container is not None) and not isinstance(container, Config):
container = object.__getattribute__(container, 'parent')
return container
def resolve(self, container):
"""
Resolve this instance in the context of a container.
@param container: The container to resolve from.
@type container: L{Container}
@return: The resolved value.
@rtype: any
@raise ConfigResolutionError: If resolution fails.
"""
rv = None
path = object.__getattribute__(container, 'path')
current = self.findConfig(container)
while current is not None:
if self.type == BACKTICK:
namespaces = object.__getattribute__(current, 'namespaces')
found = False
s = str(self)[1:-1]
for ns in namespaces:
try:
try:
rv = eval(s, vars(ns))
except TypeError: #Python 2.7 - vars is a dictproxy
rv = eval(s, {}, vars(ns))
found = True
break
except:
logger.debug("unable to resolve %r in %r", s, ns)
pass
if found:
break
else:
firstkey = self.elements[0]
if firstkey in current.resolving:
current.resolving.remove(firstkey)
raise ConfigResolutionError("Circular reference: %r" % firstkey)
current.resolving.add(firstkey)
key = firstkey
try:
rv = current[key]
for item in self.elements[1:]:
key = item[1]
rv = rv[key]
current.resolving.remove(firstkey)
break
except ConfigResolutionError:
raise
except:
logger.debug("Unable to resolve %r: %s", key, sys.exc_info()[1])
rv = None
pass
current.resolving.discard(firstkey)
current = self.findConfig(object.__getattribute__(current, 'parent'))
if current is None:
raise ConfigResolutionError("unable to evaluate %r in the configuration %s" % (self, path))
return rv
def __str__(self):
s = self.elements[0]
for tt, tv in self.elements[1:]:
if tt == DOT:
s += '.%s' % tv
else:
s += '[%r]' % tv
if self.type == BACKTICK:
return BACKTICK + s + BACKTICK
else:
return DOLLAR + s
def __repr__(self):
return self.__str__()
class Expression(object):
"""
This internal class implements a value which is obtained by evaluating an expression.
"""
def __init__(self, op, lhs, rhs):
"""
Initialize an instance.
@param op: the operation expressed in the expression.
@type op: PLUS, MINUS, STAR, SLASH, MOD
@param lhs: the left-hand-side operand of the expression.
@type lhs: any Expression or primary value.
@param rhs: the right-hand-side operand of the expression.
@type rhs: any Expression or primary value.
"""
self.op = op
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return '%r %s %r' % (self.lhs, self.op, self.rhs)
def __repr__(self):
return self.__str__()
def evaluate(self, container):
"""
Evaluate this instance in the context of a container.
@param container: The container to evaluate in from.
@type container: L{Container}
@return: The evaluated value.
@rtype: any
@raise ConfigResolutionError: If evaluation fails.
@raise ZeroDivideError: If division by zero occurs.
@raise TypeError: If the operation is invalid, e.g.
subtracting one string from another.
"""
lhs = self.lhs
if isinstance(lhs, Reference):
lhs = lhs.resolve(container)
elif isinstance(lhs, Expression):
lhs = lhs.evaluate(container)
rhs = self.rhs
if isinstance(rhs, Reference):
rhs = rhs.resolve(container)
elif isinstance(rhs, Expression):
rhs = rhs.evaluate(container)
op = self.op
if op == PLUS:
rv = lhs + rhs
elif op == MINUS:
rv = lhs - rhs
elif op == STAR:
rv = lhs * rhs
elif op == SLASH:
rv = lhs / rhs
else:
rv = lhs % rhs
return rv
class ConfigReader(object):
"""
This internal class implements a parser for configurations.
"""
def __init__(self, config):
self.filename = None
self.config = config
self.lineno = 0
self.colno = 0
self.lastc = None
self.last_token = None
self.commentchars = '#'
self.whitespace = ' \t\r\n'
self.quotes = '\'"'
self.punct = ':-+*/%,.{}[]()@`$'
self.digits = '0123456789'
self.wordchars = '%s' % WORDCHARS # make a copy
self.identchars = self.wordchars + self.digits
self.pbchars = []
self.pbtokens = []
self.comment = None
def location(self):
"""
Return the current location (filename, line, column) in the stream
as a string.
Used when printing error messages,
@return: A string representing a location in the stream being read.
@rtype: str
"""
return "%s(%d,%d)" % (self.filename, self.lineno, self.colno)
def getChar(self):
"""
Get the next char from the stream. Update line and column numbers
appropriately.
@return: The next character from the stream.
@rtype: str
"""
if self.pbchars:
c = self.pbchars.pop()
else:
c = self.stream.read(1)
self.colno += 1
if c == '\n':
self.lineno += 1
self.colno = 1
return c
def __repr__(self):
return "<ConfigReader at 0x%08x>" % id(self)
__str__ = __repr__
def getToken(self):
"""
Get a token from the stream. String values are returned in a form
where you need to eval() the returned value to get the actual
string. The return value is (token_type, token_value).
Multiline string tokenizing is thanks to David Janes (BlogMatrix)
@return: The next token.
@rtype: A token tuple.
"""
if self.pbtokens:
return self.pbtokens.pop()
stream = self.stream
self.comment = None
token = ''
tt = EOF
while True:
c = self.getChar()
if not c:
break
elif c == '#':
self.comment = stream.readline()
self.lineno += 1
continue
if c in self.quotes:
token = c
quote = c
tt = STRING
escaped = False
multiline = False
c1 = self.getChar()
if c1 == quote:
c2 = self.getChar()
if c2 == quote:
multiline = True
token += quote
token += quote
else:
self.pbchars.append(c2)
self.pbchars.append(c1)
else:
self.pbchars.append(c1)
while True:
c = self.getChar()
if not c:
break
token += c
if (c == quote) and not escaped:
if not multiline or (len(token) >= 6 and token.endswith(token[:3]) and token[-4] != '\\'):
break
if c == '\\':
escaped = not escaped
else:
escaped = False
if not c:
raise ConfigFormatError('%s: Unterminated quoted string: %r, %r' % (self.location(), token, c))
break
if c in self.whitespace:
self.lastc = c
continue
elif c in self.punct:
token = c
tt = c
if (self.lastc == ']') or (self.lastc in self.identchars):
if c == '[':
tt = LBRACK2
elif c == '(':
tt = LPAREN2
break
elif c in self.digits:
token = c
tt = NUMBER
in_exponent=False
while True:
c = self.getChar()
if not c:
break
if c in self.digits:
token += c
elif (c == '.') and token.find('.') < 0 and not in_exponent:
token += c
elif (c == '-') and token.find('-') < 0 and in_exponent:
token += c
elif (c in 'eE') and token.find('e') < 0 and\
token.find('E') < 0:
token += c
in_exponent = True
else:
if c and (c not in self.whitespace):
self.pbchars.append(c)
break
break
elif c in self.wordchars:
token = c
tt = WORD
c = self.getChar()
while c and (c in self.identchars):
token += c
c = self.getChar()
if c: # and c not in self.whitespace:
self.pbchars.append(c)
if token == "True":
tt = TRUE
elif token == "False":
tt = FALSE
elif token == "None":
tt = NONE
break
else:
raise ConfigFormatError('%s: Unexpected character: %r' % (self.location(), c))
if token:
self.lastc = token[-1]
else:
self.lastc = None
self.last_token = tt
return (tt, token)
def load(self, stream, parent=None, suffix=None):
"""
Load the configuration from the specified stream.
@param stream: A stream from which to load the configuration.
@type stream: A stream (file-like object).
@param parent: The parent of the configuration (to which this reader
belongs) in the hierarchy. Specified when the configuration is
included in another one.
@type parent: A L{Container} instance.
@param suffix: The suffix of this configuration in the parent
configuration. Should be specified whenever the parent is not None.
@raise ConfigError: If parent is specified but suffix is not.
@raise ConfigFormatError: If there are syntax errors in the stream.
"""
if parent is not None:
if suffix is None:
raise ConfigError("internal error: load called with parent but no suffix")
self.config.setPath(makePath(object.__getattribute__(parent, 'path'), suffix))
self.setStream(stream)
self.token = self.getToken()
self.parseMappingBody(self.config)
if self.token[0] != EOF:
raise ConfigFormatError('%s: expecting EOF, found %r' % (self.location(), self.token[1]))
def setStream(self, stream):
"""
Set the stream to the specified value, and prepare to read from it.
@param stream: A stream from which to load the configuration.
@type stream: A stream (file-like object).
"""
self.stream = stream
if hasattr(stream, 'name'):
filename = stream.name
else:
filename = '?'
self.filename = filename
self.lineno = 1
self.colno = 1
def match(self, t):
"""
Ensure that the current token type matches the specified value, and
advance to the next token.
@param t: The token type to match.
@type t: A valid token type.
@return: The token which was last read from the stream before this
function is called.
@rtype: a token tuple - see L{getToken}.
@raise ConfigFormatError: If the token does not match what's expected.
"""
if self.token[0] != t:
raise ConfigFormatError("%s: expecting %s, found %r" % (self.location(), t, self.token[1]))
rv = self.token
self.token = self.getToken()
return rv
def parseMappingBody(self, parent):
"""
Parse the internals of a mapping, and add entries to the provided
L{Mapping}.
@param parent: The mapping to add entries to.
@type parent: A L{Mapping} instance.
"""
while self.token[0] in [WORD, STRING]:
self.parseKeyValuePair(parent)
def parseKeyValuePair(self, parent):
"""
Parse a key-value pair, and add it to the provided L{Mapping}.
@param parent: The mapping to add entries to.
@type parent: A L{Mapping} instance.
@raise ConfigFormatError: if a syntax error is found.
"""
comment = self.comment
tt, tv = self.token
if tt == WORD:
key = tv
suffix = tv
elif tt == STRING:
key = eval(tv)
suffix = '[%s]' % tv
else:
msg = "%s: expecting word or string, found %r"
raise ConfigFormatError(msg % (self.location(), tv))
self.token = self.getToken()
# for now, we allow key on its own as a short form of key : True
if self.token[0] == COLON:
self.token = self.getToken()
value = self.parseValue(parent, suffix)
else:
value = True
try:
parent.addMapping(key, value, comment)
except Exception, e:
raise ConfigFormatError("%s: %s, %r" % (self.location(), e,
self.token[1]))
tt = self.token[0]
if tt not in [EOF, WORD, STRING, RCURLY, COMMA]:
msg = "%s: expecting one of EOF, WORD, STRING,\
RCURLY, COMMA, found %r"
raise ConfigFormatError(msg % (self.location(), self.token[1]))
if tt == COMMA:
self.token = self.getToken()
def parseValue(self, parent, suffix):
"""
Parse a value.
@param parent: The container to which the value will be added.
@type parent: A L{Container} instance.
@param suffix: The suffix for the value.
@type suffix: str
@return: The value
@rtype: any
@raise ConfigFormatError: if a syntax error is found.
"""
tt = self.token[0]
if tt in [STRING, WORD, NUMBER, LPAREN, DOLLAR,
TRUE, FALSE, NONE, BACKTICK, MINUS]:
rv = self.parseScalar()
elif tt == LBRACK:
rv = self.parseSequence(parent, suffix)
elif tt in [LCURLY, AT]:
rv = self.parseMapping(parent, suffix)
else:
raise ConfigFormatError("%s: unexpected input: %r" %
(self.location(), self.token[1]))
return rv
def parseSequence(self, parent, suffix):
"""
Parse a sequence.
@param parent: The container to which the sequence will be added.
@type parent: A L{Container} instance.
@param suffix: The suffix for the value.
@type suffix: str
@return: a L{Sequence} instance representing the sequence.
@rtype: L{Sequence}
@raise ConfigFormatError: if a syntax error is found.
"""
rv = Sequence(parent)
rv.setPath(makePath(object.__getattribute__(parent, 'path'), suffix))
self.match(LBRACK)
comment = self.comment
tt = self.token[0]
while tt in [STRING, WORD, NUMBER, LCURLY, LBRACK, LPAREN, DOLLAR,
TRUE, FALSE, NONE, BACKTICK, MINUS]:
suffix = '[%d]' % len(rv)
value = self.parseValue(parent, suffix)
rv.append(value, comment)
tt = self.token[0]
comment = self.comment
if tt == COMMA:
self.match(COMMA)
tt = self.token[0]
comment = self.comment
continue
self.match(RBRACK)
return rv
def parseMapping(self, parent, suffix):
"""
Parse a mapping.
@param parent: The container to which the mapping will be added.
@type parent: A L{Container} instance.
@param suffix: The suffix for the value.
@type suffix: str
@return: a L{Mapping} instance representing the mapping.
@rtype: L{Mapping}
@raise ConfigFormatError: if a syntax error is found.
"""
if self.token[0] == LCURLY:
self.match(LCURLY)
rv = Mapping(parent)
rv.setPath(
makePath(object.__getattribute__(parent, 'path'), suffix))
self.parseMappingBody(rv)
self.match(RCURLY)
else:
self.match(AT)
tt, fn = self.match(STRING)
rv = Config(eval(fn), parent)
return rv
def parseScalar(self):
"""
Parse a scalar - a terminal value such as a string or number, or
an L{Expression} or L{Reference}.
@return: the parsed scalar
@rtype: any scalar
@raise ConfigFormatError: if a syntax error is found.
"""
lhs = self.parseTerm()
tt = self.token[0]
while tt in [PLUS, MINUS]:
self.match(tt)
rhs = self.parseTerm()
lhs = Expression(tt, lhs, rhs)
tt = self.token[0]
return lhs
def parseTerm(self):
"""
Parse a term in an additive expression (a + b, a - b)
@return: the parsed term
@rtype: any scalar
@raise ConfigFormatError: if a syntax error is found.
"""
lhs = self.parseFactor()
tt = self.token[0]
while tt in [STAR, SLASH, MOD]:
self.match(tt)
rhs = self.parseFactor()
lhs = Expression(tt, lhs, rhs)
tt = self.token[0]
return lhs
def parseFactor(self):
"""
Parse a factor in an multiplicative expression (a * b, a / b, a % b)
@return: the parsed factor
@rtype: any scalar
@raise ConfigFormatError: if a syntax error is found.
"""
tt = self.token[0]
if tt in [NUMBER, WORD, STRING, TRUE, FALSE, NONE]:
rv = self.token[1]
if tt != WORD:
rv = eval(rv)
self.match(tt)
elif tt == LPAREN:
self.match(LPAREN)
rv = self.parseScalar()
self.match(RPAREN)
elif tt == DOLLAR:
self.match(DOLLAR)
rv = self.parseReference(DOLLAR)
elif tt == BACKTICK:
self.match(BACKTICK)
rv = self.parseReference(BACKTICK)
self.match(BACKTICK)
elif tt == MINUS:
self.match(MINUS)
rv = -self.parseScalar()
else:
raise ConfigFormatError("%s: unexpected input: %r" %
(self.location(), self.token[1]))
return rv
def parseReference(self, type):
"""
Parse a reference.
@return: the parsed reference
@rtype: L{Reference}
@raise ConfigFormatError: if a syntax error is found.
"""
word = self.match(WORD)
rv = Reference(self.config, type, word[1])
while self.token[0] in [DOT, LBRACK2]:
self.parseSuffix(rv)
return rv
def parseSuffix(self, ref):
"""
Parse a reference suffix.
@param ref: The reference of which this suffix is a part.
@type ref: L{Reference}.
@raise ConfigFormatError: if a syntax error is found.
"""
tt = self.token[0]
if tt == DOT:
self.match(DOT)
word = self.match(WORD)
ref.addElement(DOT, word[1])
else:
self.match(LBRACK2)
tt, tv = self.token
if tt not in [NUMBER, STRING]:
raise ConfigFormatError("%s: expected number or string, found %r" % (self.location(), tv))
self.token = self.getToken()
tv = eval(tv)
self.match(RBRACK)
ref.addElement(LBRACK, tv)
def defaultMergeResolve(map1, map2, key):
"""
A default resolver for merge conflicts. Returns a string
indicating what action to take to resolve the conflict.
@param map1: The map being merged into.
@type map1: L{Mapping}.
@param map2: The map being used as the merge operand.
@type map2: L{Mapping}.
@param key: The key in map2 (which also exists in map1).
@type key: str
@return: One of "merge", "append", "mismatch" or "overwrite"
indicating what action should be taken. This should
be appropriate to the objects being merged - e.g.
there is no point returning "merge" if the two objects
are instances of L{Sequence}.
@rtype: str
"""
obj1 = map1[key]
obj2 = map2[key]
if isinstance(obj1, Mapping) and isinstance(obj2, Mapping):
rv = "merge"
elif isinstance(obj1, Sequence) and isinstance(obj2, Sequence):
rv = "append"
else:
rv = "mismatch"
return rv
def overwriteMergeResolve(map1, map2, key):
"""
An overwriting resolver for merge conflicts. Calls L{defaultMergeResolve},
but where a "mismatch" is detected, returns "overwrite" instead.
@param map1: The map being merged into.
@type map1: L{Mapping}.
@param map2: The map being used as the merge operand.
@type map2: L{Mapping}.
@param key: The key in map2 (which also exists in map1).
@type key: str
"""
rv = defaultMergeResolve(map1, map2, key)
if rv == "mismatch":
rv = "overwrite"
return rv
class ConfigMerger(object):
"""
This class is used for merging two configurations. If a key exists in the
merge operand but not the merge target, then the entry is copied from the
merge operand to the merge target. If a key exists in both configurations,
then a resolver (a callable) is called to decide how to handle the
conflict.
"""
def __init__(self, resolver=defaultMergeResolve):
"""
Initialise an instance.
@param resolver:
@type resolver: A callable which takes the argument list
(map1, map2, key) where map1 is the mapping being merged into,
map2 is the merge operand and key is the clashing key. The callable
should return a string indicating how the conflict should be resolved.
For possible return values, see L{defaultMergeResolve}. The default
value preserves the old behaviour
"""
self.resolver = resolver
def merge(self, merged, mergee):
"""
Merge two configurations. The second configuration is unchanged,
and the first is changed to reflect the results of the merge.
@param merged: The configuration to merge into.
@type merged: L{Config}.
@param mergee: The configuration to merge.
@type mergee: L{Config}.
"""
self.mergeMapping(merged, mergee)
def mergeMapping(self, map1, map2):
"""
Merge two mappings recursively. The second mapping is unchanged,
and the first is changed to reflect the results of the merge.
@param map1: The mapping to merge into.
@type map1: L{Mapping}.
@param map2: The mapping to merge.
@type map2: L{Mapping}.
"""
keys = map1.keys()
for key in map2.keys():
if key not in keys:
map1[key] = map2[key]
else:
obj1 = map1[key]
obj2 = map2[key]
decision = self.resolver(map1, map2, key)
if decision == "merge":
self.mergeMapping(obj1, obj2)
elif decision == "append":
self.mergeSequence(obj1, obj2)
elif decision == "overwrite":
map1[key] = obj2
elif decision == "mismatch":
self.handleMismatch(obj1, obj2)
else:
msg = "unable to merge: don't know how to implement %r"
raise ValueError(msg % decision)
def mergeSequence(self, seq1, seq2):
"""
Merge two sequences. The second sequence is unchanged,
and the first is changed to have the elements of the second
appended to it.
@param seq1: The sequence to merge into.
@type seq1: L{Sequence}.
@param seq2: The sequence to merge.
@type seq2: L{Sequence}.
"""
data1 = object.__getattribute__(seq1, 'data')
data2 = object.__getattribute__(seq2, 'data')
for obj in data2:
data1.append(obj)
comment1 = object.__getattribute__(seq1, 'comments')
comment2 = object.__getattribute__(seq2, 'comments')
for obj in comment2:
comment1.append(obj)
def handleMismatch(self, obj1, obj2):
"""
Handle a mismatch between two objects.
@param obj1: The object to merge into.
@type obj1: any
@param obj2: The object to merge.
@type obj2: any
"""
raise ConfigError("unable to merge %r with %r" % (obj1, obj2))
class ConfigList(list):
"""
This class implements an ordered list of configurations and allows you
to try getting the configuration from each entry in turn, returning
the first successfully obtained value.
"""
def getByPath(self, path):
"""
Obtain a value from the first configuration in the list which defines
it.
@param path: The path of the value to retrieve.
@type path: str
@return: The value from the earliest configuration in the list which
defines it.
@rtype: any
@raise ConfigError: If no configuration in the list has an entry with
the specified path.
"""
found = False
rv = None
for entry in self:
try:
rv = entry.getByPath(path)
found = True
break
except ConfigError:
pass
if not found:
raise ConfigError("unable to resolve %r" % path)
return rv
| apache-2.0 |
dkodnik/arp | addons/sale_analytic_plans/__init__.py | 443 | 1208 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import sale_analytic_plans
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
brandond/ansible | lib/ansible/modules/network/aci/aci_tenant_span_src_group_to_dst_group.py | 19 | 7078 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_tenant_span_src_group_to_dst_group
short_description: Bind SPAN source groups to destination groups (span:SpanLbl)
description:
- Bind SPAN source groups to associated destinaton groups on Cisco ACI fabrics.
version_added: '2.4'
options:
description:
description:
- The description for Span source group to destination group binding.
type: str
aliases: [ descr ]
dst_group:
description:
- The Span destination group to associate with the source group.
type: str
src_group:
description:
- The name of the Span source group.
type: str
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the Tenant.
type: str
aliases: [ tenant_name ]
extends_documentation_fragment: aci
notes:
- The C(tenant), C(src_group), and C(dst_group) must exist before using this module in your playbook.
The M(aci_tenant), M(aci_tenant_span_src_group), and M(aci_tenant_span_dst_group) modules can be used for this.
seealso:
- module: aci_tenant
- module: aci_tenant_span_src_group
- module: aci_tenant_span_dst_group
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(span:SrcGrp).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r'''
- aci_tenant_span_src_group_to_dst_group:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
src_group: "{{ src_group }}"
dst_group: "{{ dst_group }}"
description: "{{ description }}"
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
dst_group=dict(type='str'), # Not required for querying all objects
src_group=dict(type='str'), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['dst_group', 'src_group', 'tenant']],
['state', 'present', ['dst_group', 'src_group', 'tenant']],
],
)
description = module.params['description']
dst_group = module.params['dst_group']
src_group = module.params['src_group']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='spanSrcGrp',
aci_rn='srcgrp-{0}'.format(src_group),
module_object=src_group,
target_filter={'name': src_group},
),
subclass_2=dict(
aci_class='spanSpanLbl',
aci_rn='spanlbl-{0}'.format(dst_group),
module_object=dst_group,
target_filter={'name': dst_group},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='spanSpanLbl',
class_config=dict(
descr=description,
name=dst_group,
),
)
aci.get_diff(aci_class='spanSpanLbl')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
sdlBasic/sdlbrt | win32/mingw/opt/lib/python2.7/toaiff.py | 293 | 3142 | """Convert "arbitrary" sound files to AIFF (Apple and SGI's audio format).
Input may be compressed.
Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others.
An exception is raised if the file is not of a recognized type.
Returned filename is either the input filename or a temporary filename;
in the latter case the caller must ensure that it is removed.
Other temporary files used are removed by the function.
"""
from warnings import warnpy3k
warnpy3k("the toaiff module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import os
import tempfile
import pipes
import sndhdr
__all__ = ["error", "toaiff"]
table = {}
t = pipes.Template()
t.append('sox -t au - -t aiff -r 8000 -', '--')
table['au'] = t
# XXX The following is actually sub-optimal.
# XXX The HCOM sampling rate can be 22k, 22k/2, 22k/3 or 22k/4.
# XXX We must force the output sampling rate else the SGI won't play
# XXX files sampled at 5.5k or 7.333k; however this means that files
# XXX sampled at 11k are unnecessarily expanded.
# XXX Similar comments apply to some other file types.
t = pipes.Template()
t.append('sox -t hcom - -t aiff -r 22050 -', '--')
table['hcom'] = t
t = pipes.Template()
t.append('sox -t voc - -t aiff -r 11025 -', '--')
table['voc'] = t
t = pipes.Template()
t.append('sox -t wav - -t aiff -', '--')
table['wav'] = t
t = pipes.Template()
t.append('sox -t 8svx - -t aiff -r 16000 -', '--')
table['8svx'] = t
t = pipes.Template()
t.append('sox -t sndt - -t aiff -r 16000 -', '--')
table['sndt'] = t
t = pipes.Template()
t.append('sox -t sndr - -t aiff -r 16000 -', '--')
table['sndr'] = t
uncompress = pipes.Template()
uncompress.append('uncompress', '--')
class error(Exception):
pass
def toaiff(filename):
temps = []
ret = None
try:
ret = _toaiff(filename, temps)
finally:
for temp in temps[:]:
if temp != ret:
try:
os.unlink(temp)
except os.error:
pass
temps.remove(temp)
return ret
def _toaiff(filename, temps):
if filename[-2:] == '.Z':
(fd, fname) = tempfile.mkstemp()
os.close(fd)
temps.append(fname)
sts = uncompress.copy(filename, fname)
if sts:
raise error, filename + ': uncompress failed'
else:
fname = filename
try:
ftype = sndhdr.whathdr(fname)
if ftype:
ftype = ftype[0] # All we're interested in
except IOError, msg:
if type(msg) == type(()) and len(msg) == 2 and \
type(msg[0]) == type(0) and type(msg[1]) == type(''):
msg = msg[1]
if type(msg) != type(''):
msg = repr(msg)
raise error, filename + ': ' + msg
if ftype == 'aiff':
return fname
if ftype is None or not ftype in table:
raise error, '%s: unsupported audio file type %r' % (filename, ftype)
(fd, temp) = tempfile.mkstemp()
os.close(fd)
temps.append(temp)
sts = table[ftype].copy(fname, temp)
if sts:
raise error, filename + ': conversion to aiff failed'
return temp
| lgpl-2.1 |
jakebailey/PrairieLearn | tools/generate_text.py | 4 | 6445 | #!/usr/bin/env python
import os, fnmatch, sys, re, hashlib, subprocess, platform, glob
CONVERT_CMD = "convert"
if platform.system() == "Windows":
globspec = "C:\Program Files\ImageMagick*\convert.exe"
magicks = glob.glob(globspec)
if len(magicks) < 1:
print("ERROR: No files match %s" % globspec)
sys.exit(1)
if len(magicks) > 1:
print("ERROR: Multiple files match %s" % globspec)
for m in magicks:
print(m)
sys.exit(1)
CONVERT_CMD = magicks[0]
print("Convert command: %s" % CONVERT_CMD)
# find strings that look like "TEX:abc" or 'TEX:abc' (note different quote types
# use <quote> to store the type of quote
# use the negative-lookahead regex ((?!(?P=quote)).) to match non-quote characters
TEXT_RE = re.compile("(?P<quote>['\"])TEX:(((?!(?P=quote)).)+)(?P=quote)")
# filename regexp for generated files
FILENAME_RE = re.compile("[0-9a-fA-F]{40}\\..{3}")
if len(sys.argv) <= 2:
print("Usage: generate_text <outputdir> <basedir1> <basedir2> ...")
print("or: generate_text --subdir <basedir1> <basedir2> ...")
sys.exit(0)
if sys.argv[1] == "--subdir":
MODE = "subdir"
print("Output directory: 'text/' within each subdirectory")
else:
MODE = "textdir"
TEXT_DIR = sys.argv[1]
print("Output directory: %s" % TEXT_DIR)
def output_dir(filename):
if MODE == "textdir":
return TEXT_DIR
else:
return os.path.join(os.path.dirname(filename), "text")
def ensure_dir_exists(d):
if not os.path.isdir(d):
os.mkdir(d)
escape_seqs = {
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
"'": "'",
'"': '"',
"\\": "\\",
}
def unescape(s):
chars = []
i = 0
while i < len(s):
if s[i] != "\\":
chars.append(s[i])
else:
if i == len(s) - 1:
break
i += 1
if s[i] in escape_seqs:
chars.append(escape_seqs[s[i]])
i += 1
return "".join(chars)
def process_file(filename):
print(filename)
img_filenames = []
with open(filename) as file:
for line in file:
for match in TEXT_RE.finditer(line):
match_text = match.group(2)
text = unescape(match_text)
hash = hashlib.sha1(text.encode()).hexdigest()
print(hash + " " + text)
tex_filename = hash + ".tex"
pdf_filename = hash + ".pdf"
img_filename = hash + ".png"
outdir = output_dir(filename)
ensure_dir_exists(outdir)
tex_full_filename = os.path.join(outdir, tex_filename)
img_full_filename = os.path.join(outdir, img_filename)
if not os.path.exists(img_full_filename):
print("Writing tex file " + tex_full_filename)
with open(tex_full_filename, "w") as texfile:
texfile.write("\\documentclass[12pt]{article}\n")
texfile.write("\\usepackage{amsmath,amsthm,amssymb}\n")
texfile.write("\\begin{document}\n")
texfile.write("\\thispagestyle{empty}\n")
texfile.write(text + "\n")
texfile.write("\\end{document}\n")
print("Running pdflatex on " + tex_filename)
subprocess.check_call(["pdflatex", tex_filename], cwd=outdir)
print("Running convert on " + pdf_filename)
subprocess.check_call([CONVERT_CMD, "-density", "96",
pdf_filename, "-trim", "+repage",
img_filename], cwd=outdir)
img_filenames.append(img_filename)
img_hi_filename = hash + "_hi.png"
img_hi_full_filename = os.path.join(outdir, img_hi_filename)
if not os.path.exists(img_hi_full_filename):
print("Writing tex file " + tex_full_filename)
with open(tex_full_filename, "w") as texfile:
texfile.write("\\documentclass[12pt]{article}\n")
texfile.write("\\usepackage{amsmath,amsthm,amssymb}\n")
texfile.write("\\begin{document}\n")
texfile.write("\\thispagestyle{empty}\n")
texfile.write(text + "\n")
texfile.write("\\end{document}\n")
print("Running pdflatex on " + tex_filename)
subprocess.check_call(["pdflatex", tex_filename], cwd=outdir)
print("Running convert on " + pdf_filename)
subprocess.check_call([CONVERT_CMD, "-density", "600",
pdf_filename, "-trim", "+repage",
img_hi_filename], cwd=outdir)
img_filenames.append(img_hi_filename)
return img_filenames
def delete_non_matching(basedir, nondelete_filenames):
if not os.path.exists(basedir) or not os.path.isdir(basedir):
return
filenames = os.listdir(basedir)
for filename in filenames:
if filename not in nondelete_filenames:
if FILENAME_RE.match(filename):
full_filename = os.path.join(basedir, filename)
print("deleting " + full_filename)
os.unlink(full_filename)
if MODE == "subdir":
for basedir in sys.argv[2:]:
print("########################################")
print("Processing %s" % basedir)
for (dirpath, dirnames, filenames) in os.walk(basedir):
img_filenames = []
for filename in fnmatch.filter(filenames, "*.js"):
img_filenames += process_file(os.path.join(dirpath, filename))
text_dir = os.path.join(dirpath, "text")
delete_non_matching(text_dir, img_filenames)
else:
img_filenames = []
for basedir in sys.argv[2:]:
print("########################################")
print("Processing %s" % basedir)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for filename in fnmatch.filter(filenames, "*.js"):
img_filenames += process_file(os.path.join(dirpath, filename))
delete_non_matching(TEXT_DIR, img_filenames)
| agpl-3.0 |
f3r/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/io.py | 51 | 64383 | """
The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
seperation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# This is a prototype; hopefully eventually some of this will be
# reimplemented in C.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
from __future__ import print_function
from __future__ import unicode_literals
__author__ = ("Guido van Rossum <[email protected]>, "
"Mike Verdone <[email protected]>, "
"Mark Russell <[email protected]>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper"]
import os
import abc
import codecs
import _fileio
import threading
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# py3k has only new style classes
__metaclass__ = type
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
IOError.__init__(self, errno, strerror)
self.characters_written = characters_written
def open(file, mode="r", buffering=None, encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. If the file cannot be opened, an IOError is
raised.
file is either a string giving the name (and the path if the file
isn't in the current working directory) of the file to be opened or an
integer file descriptor of the file to be wrapped. (If a file
descriptor is given, it is closed when the returned I/O object is
closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy. By
default full buffering is on. Pass 0 to switch buffering off (only
allowed in binary mode), 1 to set line buffering, and an integer > 1
for full buffering.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (basestring, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, basestring):
raise TypeError("invalid mode: %r" % mode)
if buffering is not None and not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, basestring):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, basestring):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
if buffering is None:
buffering = -1
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class _DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=None, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dumbdbm does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = _DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(object):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise a IOError when operations they do not support are called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statment is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
__metaclass__ = abc.ABCMeta
### Internal ###
def _unsupported(self, name):
"""Internal: raise an exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence = 0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return current stream position."""
return self.seek(0, 1)
def truncate(self, pos = None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
except IOError:
pass # If flush() fails, just give up
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise an IOError if file is not seekable
"""
if not self.seekable():
raise IOError("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise an IOError if file is not readable
"""
if not self.readable():
raise IOError("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise an IOError if file is not writable
"""
if not self.writable():
raise IOError("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self):
"""Context management protocol. Returns self."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit = -1):
r"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
self._checkClosed()
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
if not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None:
hint = -1
if not isinstance(hint, (int, long)):
raise TypeError("hint must be an integer")
if hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n = -1):
"""Read and return up to n bytes.
Returns an empty bytes array on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
return bytes(res)
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block as has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
class FileIO(_fileio._FileIO, RawIOBase):
"""Raw I/O implementation for OS files."""
# This multiply inherits from _FileIO and RawIOBase to make
# isinstance(io.FileIO(), io.RawIOBase) return True without requiring
# that _fileio._FileIO inherits from io.RawIOBase (which would be hard
# to do since _fileio.c is written in C).
def __init__(self, name, mode="r", closefd=True):
_fileio._FileIO.__init__(self, name, mode, closefd)
self._name = name
def close(self):
_fileio._FileIO.close(self)
RawIOBase.close(self)
@property
def name(self):
return self._name
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n = None):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def readinto(self, b):
"""Read up to len(b) bytes into b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
def write(self, b):
"""Write the given buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self.raw = raw
### Positioning ###
def seek(self, pos, whence=0):
return self.raw.seek(pos, whence)
def tell(self):
return self.raw.tell()
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
self.raw.flush()
def close(self):
if not self.closed:
try:
self.flush()
except IOError:
pass # If flush() fails, just give up
self.raw.close()
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class _BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
# XXX More docs
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += bytearray(initial_bytes)
self._buffer = buf
self._pos = 0
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("argument must be an integer")
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""this is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos = pos.__index__()
except AttributeError as err:
raise TypeError("an integer is required") # from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
elif pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return self.seek(pos)
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
# Use the faster implementation of BytesIO if available
try:
import _bytesio
class BytesIO(_bytesio._BytesIO, BufferedIOBase):
__doc__ = _bytesio._BytesIO.__doc__
except ImportError:
BytesIO = _BytesIO
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
raw._checkReadable()
_BufferedIOMixin.__init__(self, raw)
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = threading.Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n <= 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return self.raw.tell() - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = self.raw.seek(pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEAFULT_BUFFER_SIZE. If max_buffer_size is omitted, it defaults to
twice the buffer size.
"""
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkWritable()
_BufferedIOMixin.__init__(self, raw)
self.buffer_size = buffer_size
self.max_buffer_size = (2*buffer_size
if max_buffer_size is None
else max_buffer_size)
self._write_buf = bytearray()
self._write_lock = threading.Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer
try:
self._flush_unlocked()
except BlockingIOError as e:
# We can't accept anything else.
# XXX Why not just let the exception pass through?
raise BlockingIOError(e.errno, e.strerror, 0)
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.max_buffer_size:
# We've hit max_buffer_size. We have to accept a
# partial write and cut back our buffer.
overage = len(self._write_buf) - self.max_buffer_size
self._write_buf = self._write_buf[:self.max_buffer_size]
raise BlockingIOError(e.errno, e.strerror, overage)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
written = 0
try:
while self._write_buf:
n = self.raw.write(self._write_buf)
del self._write_buf[:n]
written += n
except BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
written += n
raise BlockingIOError(e.errno, e.strerror, written)
def tell(self):
return self.raw.tell() + len(self._write_buf)
def seek(self, pos, whence=0):
with self._write_lock:
self._flush_unlocked()
return self.raw.seek(pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE. The max_buffer_size (for the buffered writer)
defaults to twice the buffer size.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
reader._checkReadable()
writer._checkWritable()
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size, max_buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE. The max_buffer_size (for the buffered
writer) defaults to twice the buffer size.
"""
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
self.flush()
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
return pos
def tell(self):
if self._write_buf:
return self.raw.tell() + len(self._write_buf)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
self.seek(pos)
return BufferedWriter.truncate(self)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n = -1):
"""Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream."""
self._unsupported("write")
def truncate(self, pos = None):
"""Truncate size to pos."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
"""Codec used when reading a file in universal newlines mode.
It wraps another incremental decoder, translating \\r\\n and \\r into \\n.
It also records the types of newlines encountered.
When used with translate=False, it ensures that the newline sequence is
returned in one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line seperator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 128
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False):
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, basestring):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, basestring):
raise ValueError("invalid errors: %r" % errors)
self.buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
def seekable(self):
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
try:
self.flush()
except:
pass # If flush() fails, just give up
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, unicode):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
The return value is True unless EOF was reached. The decoded string
is placed in self._decoded_chars (replacing its previous value).
The entire input chunk is sent to the decoder, though some of it
may remain buffered in the decoder, yet to be converted.
"""
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise IOError("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
for next_byte in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
self.seek(pos)
return self.buffer.truncate()
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise IOError("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise IOError("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise IOError("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
return cookie
def read(self, n=None):
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def next(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
if not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
decoder = self._decoder or self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data
more_line = ''
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""An in-memory stream for text. The initial_value argument sets the
value of object. The other arguments are like those of TextIOWrapper's
constructor.
"""
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
if initial_value:
if not isinstance(initial_value, unicode):
initial_value = unicode(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
| apache-2.0 |
hlieberman/debian-ansible | lib/ansible/module_utils/known_hosts.py | 8 | 5832 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hmac
from hashlib import sha1
HASHED_KEY_MAGIC = "|1|"
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
""" idempotently add a git url hostkey """
fqdn = get_fqdn(module.params['repo'])
if fqdn:
known_host = check_hostkey(module, fqdn)
if not known_host:
if accept_hostkey:
rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
if rc != 0:
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
else:
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
def get_fqdn(repo_url):
""" chop the hostname out of a giturl """
result = None
if "@" in repo_url and not repo_url.startswith("http"):
repo_url = repo_url.split("@", 1)[1]
if ":" in repo_url:
repo_url = repo_url.split(":")[0]
result = repo_url
elif "/" in repo_url:
repo_url = repo_url.split("/")[0]
result = repo_url
return result
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError, e:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
result = False
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, 0700)
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
module.append_to_file(user_host_file, out)
return rc, out, err
| gpl-3.0 |
cliqz/socorro | alembic/versions/c1ac31c8fea_fix_bug_972612_is_gc_count_should_be_.py | 14 | 1359 | """fix bug 972612 - is_gc_count should be per-ADU
Revision ID: c1ac31c8fea
Revises: 1aa9adb91413
Create Date: 2014-02-13 15:14:23.916163
"""
# revision identifiers, used by Alembic.
revision = 'c1ac31c8fea'
down_revision = '491cdcf9f97c'
import datetime
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
load_stored_proc(op, ['crash_madu.sql', 'update_gccrashes.sql'])
op.execute(""" TRUNCATE gccrashes """)
op.alter_column(u'gccrashes', u'is_gc_count',
new_column_name=u'gc_count_madu', type_=sa.REAL())
now = datetime.datetime.utcnow()
for backfill_date in [
(now - datetime.timedelta(days=days)).strftime("%Y-%m-%d")
for days in range(1,30)]:
op.execute(""" SELECT backfill_gccrashes('%s') """ % backfill_date)
op.execute(""" COMMIT """)
def downgrade():
load_stored_proc(op, ['update_gccrashes.sql'])
op.execute(""" DROP FUNCTION crash_madu(bigint, numeric, numeric) """)
op.alter_column(u'gccrashes', u'gc_count_madu',
new_column_name=u'is_gc_count', type_=sa.INT())
| mpl-2.0 |
jfhumann/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/origin_check_wsh.py | 516 | 1992 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This example is derived from test/testdata/handlers/origin_check_wsh.py.
def web_socket_do_extra_handshake(request):
if request.ws_origin == 'http://example.com':
return
raise ValueError('Unacceptable origin: %r' % request.ws_origin)
def web_socket_transfer_data(request):
request.connection.write('origin_check_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
clones/python-blinker | blinker/_utilities.py | 3 | 4253 | from weakref import ref
from blinker._saferef import BoundMethodWeakref
try:
callable
except NameError:
def callable(object):
return hasattr(object, '__call__')
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
try:
from contextlib import contextmanager
except ImportError:
def contextmanager(fn):
def oops(*args, **kw):
raise RuntimeError("Python 2.5 or above is required to use "
"context managers.")
oops.__name__ = fn.__name__
return oops
class _symbol(object):
def __init__(self, name):
"""Construct a new named symbol."""
self.__name__ = self.name = name
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
foo
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
"""
symbols = {}
def __new__(cls, name):
try:
return cls.symbols[name]
except KeyError:
return cls.symbols.setdefault(name, _symbol(name))
def hashable_identity(obj):
if hasattr(obj, 'im_func'):
return (id(obj.im_func), id(obj.im_self))
else:
return id(obj)
WeakTypes = (ref, BoundMethodWeakref)
class annotatable_weakref(ref):
"""A weakref.ref that supports custom instance attributes."""
def reference(object, callback=None, **annotations):
"""Return an annotated weak ref."""
if callable(object):
weak = callable_reference(object, callback)
else:
weak = annotatable_weakref(object, callback)
for key, value in annotations.items():
setattr(weak, key, value)
return weak
def callable_reference(object, callback=None):
"""Return an annotated weak ref, supporting bound instance methods."""
if hasattr(object, 'im_self') and object.im_self is not None:
return BoundMethodWeakref(target=object, on_delete=callback)
elif hasattr(object, '__self__') and object.__self__ is not None:
return BoundMethodWeakref(target=object, on_delete=callback)
return annotatable_weakref(object, callback)
class lazy_property(object):
"""A @property that is only evaluated once."""
def __init__(self, deferred):
self._deferred = deferred
self.__doc__ = deferred.__doc__
def __get__(self, obj, cls):
if obj is None:
return self
value = self._deferred(obj)
setattr(obj, self._deferred.__name__, value)
return value
| mit |
sos22/minios-hacks | tools/xm-test/tests/network/07_network_dom0_udp_pos.py | 42 | 1550 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2006
# Author: <[email protected]>
# UDP tests to dom0.
# - determines dom0 network
# - creates a single guest domain
# - sets up a single NIC on same subnet as dom0
# - conducts hping2 udp tests to the dom0 IP address
# hping2 $dom0_IP -2 -c 1 -d $size
# where $size = 1, 48, 64, 512, 1440, 1448, 1500, 1505,
# 4096, 4192, 32767, 65507, 65508
trysizes = [ 1, 48, 64, 512, 1440, 1500, 1505, 4096, 4192,
32767, 65495 ]
from XmTestLib import *
rc = 0
# Test creates 1 domain, which requires 2 ips: 1 for the domains and 1 for
# aliases on dom0
if xmtest_netconf.canRunNetTest(2) == False:
SKIP("Don't have enough free configured IPs to run this test")
# Fire up a guest domain w/1 nic
domain = XmTestDomain()
domain.newDevice(XenNetDevice, "eth0")
try:
console = domain.start()
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
try:
# Ping dom0
fails=""
netdev = domain.getDevice("eth0")
dom0ip = netdev.getDom0AliasIP()
for size in trysizes:
out = console.runCmd("hping2 " + dom0ip + " -E /dev/urandom -2 -q -c 20"
+ " --fast -d " + str(size) + " -N " + str(size))
if out["return"]:
fails += " " + str(size)
print out["output"]
except ConsoleError, e:
FAIL(str(e))
domain.stop()
if len(fails):
FAIL("UDP hping2 to dom0 failed for size" + fails + ".")
| gpl-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/datasets/samples_generator.py | 7 | 56557 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state : int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data : array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary : array of shape [n_features, n_components]
The dictionary with normalized components (D).
code : array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : integer, optional (default=1)
The size of the random matrix to generate.
alpha : float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| mit |
ChristosChristofidis/twitter_nlp | hbc/python/evalLLda.py | 10 | 5418 | #!/usr/bin/python
import sys
import re
from optparse import OptionParser
from LdaFeatures import LdaFeatures
parser = OptionParser()
parser.add_option("--gold", dest='gold', default='gold')
parser.add_option("--dictionaries", dest='dictionaries', default='dictionaries')
parser.add_option("--dictmap", dest='dictmap', default='dict-label')
parser.add_option("--labelmap", dest='labelmap', default=None)
parser.add_option("--entityTopic", dest='entityTopic', default='entity-topic')
parser.add_option("--entities", dest='entities', default='entities')
parser.add_option("--fbBaseline", action='store_true', dest='fbBaseline', default=False)
parser.add_option("--predictedSeg", dest='predictedSeg', default=None)
parser.add_option("--pInc", action='store_true', dest='pInc', default=False)
parser.add_option("--threshold", dest='threshold', type='float', default=0.0)
(options, args) = parser.parse_args()
#Read in dictionaries
dictionaries = []
for line in open(options.dictionaries):
dictionaries.append(line.rstrip('\n'))
#Read in mappings from dictionaries to labels
dict2label = {}
for line in open(options.dictmap):
(d, l) = line.rstrip('\n').split(' ')
dict2label[d] = l
label2label = None
if options.labelmap:
label2label = {}
for line in open(options.labelmap):
(l1, l2) = line.rstrip('\n').split(' ')
label2label[l1] = l2
def MapLabel(label):
if label2label and label2label.has_key(label):
return label2label[label]
else:
return label
#Read in the gold labels
gold = []
for line in open(options.gold):
gold.append(MapLabel(line.rstrip('\n')))
#Read in entities
entities = []
for line in open(options.entities):
entities.append(line.rstrip('\n'))
#Read in labels
labels = []
for line in open('labels'):
labels.append([int(x) for x in line.rstrip('\n').split(' ')])
#Read in predicted segmentation
goldTypeCounts = None
if options.predictedSeg:
goldTypeCounts = {}
for line in open(options.predictedSeg):
(word, predicted, g) = line.rstrip('\n').split('\t')
if g[0:2] == "B-":
#print g
g = MapLabel(g[2:])
print g
if g != 'error':
goldTypeCounts[g] = goldTypeCounts.get(g,0.0) + 1.0
goldTypeCounts['all'] = goldTypeCounts.get('all',0.0) + 1.0
print goldTypeCounts
#Read in predictions and probabilities
i = 0
pred = []
dictLabelCnt = {}
confusionMatrix = {}
tp = {}
fp = {}
tn = {}
fn = {}
n = {}
for line in open(options.entityTopic):
fields = line.rstrip('\n').split('\t')
counts = [float(x.split(':')[1]) for x in fields[1:]]
dictIds = [int(x.split(':')[0]) for x in fields[1:]]
dictionary = dictionaries[dictIds[0]-1]
if options.pInc:
p = counts[0]
else:
p = counts[0] / sum(counts)
# if dict2label[dictionary] == 'NONE' and len(dictIds) > 1:
# dictionary = dictionaries[dictIds[1]-1]
# p = counts[1] / sum(counts)
if dict2label[dictionary] != gold[i]:
pred.append((entities[i],dictionary,dict2label[dictionary],gold[i],p))
if gold[i] != 'error':
dictLabelCnt["%s\t%s" % (dictionary, gold[i])] = dictLabelCnt.get("%s\t%s" % (dictionary, gold[i]),0) + 1
confusionMatrix["%s\t%s" % (dict2label[dictionary], gold[i])] = confusionMatrix.get("%s\t%s" % (dict2label[dictionary], gold[i]),0) + 1
if gold[i] != 'error':
n[gold[i]] = n.get(gold[i],0.0) + 1.0
n['all'] = n.get('all',0.0) + 1.0
if (options.fbBaseline and (sum(labels[i]) == 1)) or (not options.fbBaseline and p >= options.threshold and dict2label[dictionary] != 'NONE'):
if dict2label[dictionary] == gold[i]:
tp[dict2label[dictionary]] = tp.get(dict2label[dictionary],0.0) + 1.0
tp['all'] = tp.get('all',0.0) + 1.0
else:
fp[dict2label[dictionary]] = fp.get(dict2label[dictionary],0.0) + 1.0
fp['all'] = fp.get('all',0.0) + 1.0
if gold[i] != 'error':
fn[gold[i]] = fn.get(gold[i],0.0) + 1.0
fn['all'] = fn.get('all',0.0) + 1.0
else:
#print "%s\t%s\t%s" % (dictionary, dict2label[dictionary], gold[i])
if gold[i] != 'error':
fn[gold[i]] = fn.get(gold[i],0.0) + 1.0
fn['all'] = fn.get('all',0.0) + 1.0
i += 1
print "\n".join([str(x) for x in pred])
for pair in sorted(dictLabelCnt.keys(), cmp=lambda a,b: cmp(dictLabelCnt[b], dictLabelCnt[a])):
print "%s\t%s" % (pair, str(dictLabelCnt[pair]))
print "\nConfusion matrix:"
for pair in sorted(confusionMatrix.keys(), cmp=lambda a,b: cmp(confusionMatrix[b], confusionMatrix[a])):
(pred, gold) = pair.split('\t')
print "%s\t%s\t%s\t%s\t%s" % (gold, pred, str(confusionMatrix[pair]), confusionMatrix[pair] / n.get(gold,-1), n.get(gold,-1))
if goldTypeCounts:
n = goldTypeCounts
for t in sorted(tp.keys(), lambda a,b: cmp(n[b],n[a])):
p = tp.get(t,0.0) / (tp.get(t,0.0) + fp.get(t,0.0))
if options.predictedSeg:
#r = tp.get(t,0.0) / (tp.get(t,0.0) + goldTypeCounts[t])
r = tp.get(t,0.0) / (goldTypeCounts[t])
f = 2 * p * r / (p + r)
print "%s\tP=%s\tR=%s\tF=%s\tN=%s" % (t, p, r, f, goldTypeCounts[t])
else:
r = tp.get(t,0.0) / (tp.get(t,0.0) + fn.get(t,0.0))
f = 2 * p * r / (p + r)
print "%s\tP=%s\tR=%s\tF=%s\tN=%s" % (t, p, r, f, n[t])
| gpl-3.0 |
ocelot-collab/ocelot | ocelot_gui/app/ui_forms/sim_twiss.py | 1 | 10760 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'app/ui_forms/sim_twiss.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form_Twiss(object):
def setupUi(self, Form_Twiss):
Form_Twiss.setObjectName("Form_Twiss")
Form_Twiss.resize(1200, 750)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form_Twiss.sizePolicy().hasHeightForWidth())
Form_Twiss.setSizePolicy(sizePolicy)
Form_Twiss.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.gridLayout = QtWidgets.QGridLayout(Form_Twiss)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setContentsMargins(-1, 0, -1, 0)
self.verticalLayout_5.setSpacing(6)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.groupBox = QtWidgets.QGroupBox(Form_Twiss)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_8.setContentsMargins(0, 6, 0, 0)
self.verticalLayout_8.setSpacing(0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setSpacing(0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, -1, -1, 6)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.plus_button = QtWidgets.QPushButton(self.groupBox)
self.plus_button.setMaximumSize(QtCore.QSize(60, 16777215))
self.plus_button.setObjectName("plus_button")
self.horizontalLayout_4.addWidget(self.plus_button)
self.edit_delta = QtWidgets.QDoubleSpinBox(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edit_delta.sizePolicy().hasHeightForWidth())
self.edit_delta.setSizePolicy(sizePolicy)
self.edit_delta.setMaximumSize(QtCore.QSize(100, 16777215))
self.edit_delta.setDecimals(3)
self.edit_delta.setMinimum(-9999999999.0)
self.edit_delta.setMaximum(9999999999.99)
self.edit_delta.setSingleStep(0.1)
self.edit_delta.setProperty("value", 1.0)
self.edit_delta.setObjectName("edit_delta")
self.horizontalLayout_4.addWidget(self.edit_delta)
self.minus_button = QtWidgets.QPushButton(self.groupBox)
self.minus_button.setMaximumSize(QtCore.QSize(60, 16777215))
self.minus_button.setObjectName("minus_button")
self.horizontalLayout_4.addWidget(self.minus_button)
self.verticalLayout_7.addLayout(self.horizontalLayout_4)
self.scroll = QtWidgets.QScrollArea(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scroll.sizePolicy().hasHeightForWidth())
self.scroll.setSizePolicy(sizePolicy)
self.scroll.setMinimumSize(QtCore.QSize(300, 0))
self.scroll.setMaximumSize(QtCore.QSize(300, 16777215))
self.scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scroll.setWidgetResizable(True)
self.scroll.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.scroll.setObjectName("scroll")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 284, 534))
self.scrollAreaWidgetContents.setAutoFillBackground(False)
self.scrollAreaWidgetContents.setStyleSheet("background-color: rgb(255, 255, 255);")
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayoutWidget = QtWidgets.QWidget(self.scrollAreaWidgetContents)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 281, 80))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.widgetArea = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.widgetArea.setContentsMargins(0, 0, 0, 2)
self.widgetArea.setSpacing(0)
self.widgetArea.setObjectName("widgetArea")
self.scroll.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_7.addWidget(self.scroll)
self.verticalLayout_8.addLayout(self.verticalLayout_7)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(-1, 6, -1, 6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_3 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setObjectName("label_3")
self.horizontalLayout_6.addWidget(self.label_3)
self.edit_tws_step = QtWidgets.QDoubleSpinBox(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edit_tws_step.sizePolicy().hasHeightForWidth())
self.edit_tws_step.setSizePolicy(sizePolicy)
self.edit_tws_step.setMaximumSize(QtCore.QSize(100, 16777215))
self.edit_tws_step.setDecimals(6)
self.edit_tws_step.setMaximum(9999999999.0)
self.edit_tws_step.setSingleStep(0.1)
self.edit_tws_step.setObjectName("edit_tws_step")
self.horizontalLayout_6.addWidget(self.edit_tws_step)
self.verticalLayout_8.addLayout(self.horizontalLayout_6)
self.verticalLayout_5.addWidget(self.groupBox)
self.calc_params = QtWidgets.QPushButton(Form_Twiss)
self.calc_params.setObjectName("calc_params")
self.verticalLayout_5.addWidget(self.calc_params)
self.calc_matching = QtWidgets.QPushButton(Form_Twiss)
self.calc_matching.setObjectName("calc_matching")
self.verticalLayout_5.addWidget(self.calc_matching)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setContentsMargins(-1, 0, 0, -1)
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.btn1 = QtWidgets.QPushButton(Form_Twiss)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn1.sizePolicy().hasHeightForWidth())
self.btn1.setSizePolicy(sizePolicy)
self.btn1.setObjectName("btn1")
self.horizontalLayout_5.addWidget(self.btn1)
self.btn2 = QtWidgets.QPushButton(Form_Twiss)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn2.sizePolicy().hasHeightForWidth())
self.btn2.setSizePolicy(sizePolicy)
self.btn2.setObjectName("btn2")
self.horizontalLayout_5.addWidget(self.btn2)
self.verticalLayout_5.addLayout(self.horizontalLayout_5)
self.gridLayout.addLayout(self.verticalLayout_5, 1, 1, 1, 1)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setContentsMargins(0, -1, -1, -1)
self.verticalLayout_6.setSpacing(6)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label = QtWidgets.QLabel(Form_Twiss)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.verticalLayout_6.addWidget(self.label)
self.tws_plot_widget = QtWidgets.QWidget(Form_Twiss)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tws_plot_widget.sizePolicy().hasHeightForWidth())
self.tws_plot_widget.setSizePolicy(sizePolicy)
self.tws_plot_widget.setMinimumSize(QtCore.QSize(850, 700))
self.tws_plot_widget.setObjectName("tws_plot_widget")
self.verticalLayout_6.addWidget(self.tws_plot_widget)
self.gridLayout.addLayout(self.verticalLayout_6, 1, 0, 1, 1)
self.retranslateUi(Form_Twiss)
QtCore.QMetaObject.connectSlotsByName(Form_Twiss)
def retranslateUi(self, Form_Twiss):
_translate = QtCore.QCoreApplication.translate
Form_Twiss.setWindowTitle(_translate("Form_Twiss", "Form"))
self.groupBox.setTitle(_translate("Form_Twiss", "Tuning elements list"))
self.plus_button.setText(_translate("Form_Twiss", "+"))
self.minus_button.setText(_translate("Form_Twiss", "-"))
self.label_3.setText(_translate("Form_Twiss", "Twiss function step, m"))
self.calc_params.setText(_translate("Form_Twiss", "Calculate Main Parameters"))
self.calc_matching.setText(_translate("Form_Twiss", "Matching"))
self.btn1.setText(_translate("Form_Twiss", "Update"))
self.btn2.setText(_translate("Form_Twiss", "Reset"))
self.label.setText(_translate("Form_Twiss", "Twiss functions"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form_Twiss = QtWidgets.QWidget()
ui = Ui_Form_Twiss()
ui.setupUi(Form_Twiss)
Form_Twiss.show()
sys.exit(app.exec_())
| gpl-3.0 |
davidhstocker/Graphyne | Config/Test/TestRepository/EventInitRemove.py | 1 | 1141 | '''
Created on Dec 13, 2016
@author: David Stocker
'''
import Graphyne.Scripting
import Graphyne.Graph
class OnInitialize(Graphyne.Scripting.StateEventScript):
"""
This class tests the initialize event. As this event has no return mechanism, we'll test it
by adding a property. The EventInitRemove.InitRemoveEventTest entity that this script will
be attached to has no properties. So we'll add a property during script execution, which we
can then check for later.
"""
def execute(self, selfUUID, params):
Graphyne.Graph.api.addEntityStringProperty(selfUUID, "AProp", "Hello")
class OnDelete(Graphyne.Scripting.StateEventScript):
"""
This class tests the terminate event. It echos 'Hello World' back.
"""
def execute(self, selfUUID, params):
'''
Echo the string "Hello World", where the "Hello" part comes from the AProp property added in the OnInitialize script.
'''
hello = Graphyne.Graph.api.getEntityPropertyValue(selfUUID, "AProp")
return "%s World" %(hello)
| apache-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.5/django/contrib/auth/management/__init__.py | 70 | 6746 | """
Creates permissions for all installed apps that need permissions.
"""
from __future__ import unicode_literals
import getpass
import locale
import unicodedata
from django.contrib.auth import models as auth_app, get_user_model
from django.core import exceptions
from django.core.management.base import CommandError
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_models, signals
from django.utils import six
from django.utils.six.moves import input
def _get_permission_codename(action, opts):
return '%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts, ctype):
"""
Returns (codename, name) for all permissions in the given opts.
"""
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
_check_permission_clashing(custom, builtin, ctype)
return builtin + custom
def _get_builtin_permissions(opts):
"""
Returns (codename, name) for all autogenerated permissions.
"""
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)))
return perms
def _check_permission_clashing(custom, builtin, ctype):
"""
Check that permissions for a model do not clash. Raises CommandError if
there are duplicate permissions.
"""
pool = set()
builtin_codenames = set(p[0] for p in builtin)
for codename, _name in custom:
if codename in pool:
raise CommandError(
"The permission codename '%s' is duplicated for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
elif codename in builtin_codenames:
raise CommandError(
"The permission codename '%s' clashes with a builtin permission "
"for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
pool.add(codename)
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a context_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def create_superuser(app, created_models, verbosity, db, **kwargs):
from django.core.management import call_command
UserModel = get_user_model()
if UserModel in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True, database=db)
break
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
if not six.PY3:
try:
default_locale = locale.getdefaultlocale()[1]
except ValueError:
return ''
if not default_locale:
return ''
try:
result = result.decode(default_locale)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ''
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = unicodedata.normalize('NFKD', default_username)\
.encode('ascii', 'ignore').decode('ascii').replace(' ', '').lower()
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
signals.post_syncdb.connect(create_permissions,
dispatch_uid="django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid="django.contrib.auth.management.create_superuser")
| lgpl-3.0 |
LambdaCast/LambdaCast | portal/migrations/0018_auto__add_field_video_audioThumbURL.py | 1 | 11900 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Video.audioThumbURL'
db.add_column('portal_video', 'audioThumbURL',
self.gf('django.db.models.fields.URLField')(default='', max_length=200, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Video.audioThumbURL'
db.delete_column('portal_video', 'audioThumbURL')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'portal.channel': {
'Meta': {'object_name': 'Channel'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'portal.collection': {
'Meta': {'object_name': 'Collection'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['portal.Video']", 'symmetrical': 'False'})
},
'portal.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'timecode': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Video']"})
},
'portal.hotfolder': {
'Meta': {'object_name': 'Hotfolder'},
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'defaultName': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'folderName': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'portal.video': {
'Meta': {'object_name': 'Video'},
'assemblyid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'audioThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'encodingDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'linkURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mp3Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp3URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mp4Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp4URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'oggSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'oggURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'originalFile': ('django.db.models.fields.files.FileField', [], {'max_length': '2048', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'torrentDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'torrentURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videoThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'webmSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'webmURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['portal'] | bsd-2-clause |
krujos/strava-private-to-public | private-to-public.py | 1 | 3601 | """
Really could have implemented this all in javascript on the client side...
"""
from __future__ import print_function
import requests
from flask import Flask, redirect, url_for, request, session, abort, jsonify
import os
import sys
import logging
import json
STRAVA_CLIENT_ID = 1367
Flask.get = lambda self, path: self.route(path, methods=['get'])
Flask.put = lambda self, path: self.route(path, methods=['put'])
Flask.post = lambda self, path: self.route(path, methods=['post'])
Flask.delete = lambda self, path: self.route(path, methods=['delete'])
app = Flask(__name__)
if not os.environ.get("CLIENT_SECRET"):
print("ERROR: CLIENT_SECRET is not defined", file=sys.stderr)
exit(1)
client_secret = os.environ.get("CLIENT_SECRET")
Flask.secret_key = client_secret
app.secret_key = client_secret
redirect_url = "http://127.0.0.1:5000"
@app.get('/')
def index():
"""Return static content, index.html only, or handle callbacks."""
#Call back from Strava for token exchange.
if request.args.get('code'):
code = request.args.get('code')
session.permanent = True
session['CODE'] = code
app.logger.debug("Code = %s " % code)
get_token(request.args.get('code'))
return redirect(url_for('static', filename='loggedin.html'))
return redirect(url_for('static', filename='index.html'))
def get_token(code):
data = {"client_id": STRAVA_CLIENT_ID,
"client_secret": client_secret,
"code": code}
url = 'https://www.strava.com/oauth/token'
app.logger.debug("Post URL = %s" % url)
response = requests.post(url, data=data)
app.logger.info("Login post returned %d" % response.status_code)
app.logger.debug(response.json())
session['token'] = response.json()['access_token']
athlete = response.json()['athlete']
session['athlete_id'] = athlete['id']
session['athlete_name'] = athlete['firstname'] + " " + athlete['lastname']
@app.get('/athlete')
def get_current_user():
try:
return jsonify({"id": session['athlete_id'],
"name": session['athlete_name']})
except KeyError:
abort(404)
@app.get('/login')
def login():
return "https://www.strava.com/oauth/authorize?client_id=%s&response_type=code&redirect_uri=%s&scope=view_private,write" \
% (STRAVA_CLIENT_ID, redirect_url)
@app.get('/rides/<page>')
@app.get('/rides')
def get_rides(page=1):
"""Attempt to get all of a users rides so we can filter out the private ones"""
url = "https://www.strava.com/api/v3/athlete/activities"
data = {"per_page": 50, "page": page, "access_token": session['token']}
response = requests.get(url, data=data)
app.logger.debug("Strava return code = %d" % response.status_code)
app.logger.debug(response.json())
return json.dumps(response.json())#there has to be a better way.
@app.put('/ride/<ride_id>')
def update_ride(ride_id):
ride = request.json
app.logger.debug(ride)
if int(ride['id']) != int(ride_id):
abort(400)
app.logger.debug("Updating ride " + ride_id)
data = {"access_token": session['token']}
params = {"private": int(ride['private']), "trainer": int(ride['trainer'])}
url = "https://www.strava.com/api/v3/activities/" + ride_id
response = requests.put(url, data=data, params=params)
app.logger.debug(response.status_code)
return json.dumps(response.json())
if __name__ == '__main__':
app.logger.setLevel(logging.INFO)
file_handler = logging.FileHandler('strava.log')
app.logger.addHandler(file_handler)
app.run()
| apache-2.0 |
boxu0001/practice | py3/S1604_time.py | 1 | 2759 | '''
1604. Alert Using Same Key-Card Three or More Times in a One Hour Period
Medium
LeetCode company workers use key-cards to unlock office doors. Each time a worker uses their key-card, the security system saves the worker's name and the time when it was used. The system emits an alert if any worker uses the key-card three or more times in a one-hour period.
You are given a list of strings keyName and keyTime where [keyName[i], keyTime[i]] corresponds to a person's name and the time when their key-card was used in a single day.
Access times are given in the 24-hour time format "HH:MM", such as "23:51" and "09:49".
Return a list of unique worker names who received an alert for frequent keycard use. Sort the names in ascending order alphabetically.
Notice that "10:00" - "11:00" is considered to be within a one-hour period, while "22:51" - "23:52" is not considered to be within a one-hour period.
Example 1:
Input: keyName = ["daniel","daniel","daniel","luis","luis","luis","luis"], keyTime = ["10:00","10:40","11:00","09:00","11:00","13:00","15:00"]
Output: ["daniel"]
Explanation: "daniel" used the keycard 3 times in a one-hour period ("10:00","10:40", "11:00").
Example 2:
Input: keyName = ["alice","alice","alice","bob","bob","bob","bob"], keyTime = ["12:01","12:00","18:00","21:00","21:20","21:30","23:00"]
Output: ["bob"]
Explanation: "bob" used the keycard 3 times in a one-hour period ("21:00","21:20", "21:30").
Example 3:
Input: keyName = ["john","john","john"], keyTime = ["23:58","23:59","00:01"]
Output: []
Example 4:
Input: keyName = ["leslie","leslie","leslie","clare","clare","clare","clare"], keyTime = ["13:00","13:20","14:00","18:00","18:51","19:30","19:49"]
Output: ["clare","leslie"]
Constraints:
1 <= keyName.length, keyTime.length <= 105
keyName.length == keyTime.length
keyTime[i] is in the format "HH:MM".
[keyName[i], keyTime[i]] is unique.
1 <= keyName[i].length <= 10
keyName[i] contains only lowercase English letters.
'''
class Solution:
def alertNames(self, keyName: List[str], keyTime: List[str]) -> List[str]:
nameTimes = {}
result=[]
for i, name in enumerate(keyName):
h, m=keyTime[i].split(":")
if name not in nameTimes:
nameTimes[name] = [int(h)*60+int(m)]
else:
nameTimes[name] +=[int(h)*60+int(m)]
for name in nameTimes:
times = sorted(nameTimes[name])
find = False
lnt = len(times)
for ti in range(lnt-2):
if times[ti+2] - times[ti] <= 60:
find=True
break
if find:
result+=[name]
return sorted(result)
| gpl-3.0 |
craighiller/serendipity | venv/lib/python2.7/site-packages/setuptools/command/build_ext.py | 286 | 11854 | from distutils.command.build_ext import build_ext as _du_build_ext
try:
# Attempt to use Pyrex for building extensions, if available
from Pyrex.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
import os, sys
from distutils.file_util import copy_file
from setuptools.extension import Library
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
try:
# Python 2.7 or >=3.2
from sysconfig import _CONFIG_VARS
except ImportError:
from distutils.sysconfig import get_config_var
get_config_var("LDSHARED") # make sure _config_vars is initialized
del get_config_var
from distutils.sysconfig import _config_vars as _CONFIG_VARS
from distutils import log
from distutils.errors import *
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
from dl import RTLD_NOW
have_rtld = True
use_stubs = True
except ImportError:
pass
def if_dl(s):
if have_rtld:
return s
return ''
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,os.path.basename(filename))
src_filename = os.path.join(self.build_lib,filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'):
# Workaround for problems using some Pyrex versions w/SWIG and/or 2.4
def swig_sources(self, sources, *otherargs):
# first do any Pyrex processing
sources = _build_ext.swig_sources(self, sources) or sources
# Then do any actual SWIG stuff on the remainder
return _du_build_ext.swig_sources(self, sources, *otherargs)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self,fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
if isinstance(ext,Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn,libtype)
elif use_stubs and ext._links_to_dynamic:
d,fn = os.path.split(filename)
return os.path.join(d,'dl-'+fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext,Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = ext._links_to_dynamic = \
self.shlibs and self.links_to_dynamic(ext) or False
ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library)
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib,filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
if sys.platform == "darwin":
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext,Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self,ext)
def build_extension(self, ext):
_compiler = self.compiler
try:
if isinstance(ext,Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self,ext)
if ext._needs_stub:
self.write_stub(
self.get_finalized_command('build_py').build_lib, ext
)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False
def get_outputs(self):
outputs = _build_ext.get_outputs(self)
optimize = self.get_finalized_command('build_py').optimize
for ext in self.extensions:
if ext._needs_stub:
base = os.path.join(self.build_lib, *ext._full_name.split('.'))
outputs.append(base+'.py')
outputs.append(base+'.pyc')
if optimize:
outputs.append(base+'.pyo')
return outputs
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s",ext._full_name, output_dir)
stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py'
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file+" already exists! Please delete.")
if not self.dry_run:
f = open(stub_file,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp"+if_dl(", dl"),
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name=='nt':
# Build shared libraries
#
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
): self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
#libraries=None, library_dirs=None, runtime_library_dirs=None,
#export_symbols=None, extra_preargs=None, extra_postargs=None,
#build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir,filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
| mit |
modulexcite/catapult | third_party/html5lib-python/utils/spider.py | 436 | 4157 | #!/usr/bin/env python
"""Spider to try and find bugs in the parser. Requires httplib2 and elementtree
usage:
import spider
s = spider.Spider()
s.spider("http://www.google.com", maxURLs=100)
"""
import urllib.request, urllib.error, urllib.parse
import urllib.robotparser
import md5
import httplib2
import html5lib
from html5lib.treebuilders import etree
class Spider(object):
def __init__(self):
self.unvisitedURLs = set()
self.visitedURLs = set()
self.buggyURLs=set()
self.robotParser = urllib.robotparser.RobotFileParser()
self.contentDigest = {}
self.http = httplib2.Http(".cache")
def run(self, initialURL, maxURLs=1000):
urlNumber = 0
self.visitedURLs.add(initialURL)
content = self.loadURL(initialURL)
while maxURLs is None or urlNumber < maxURLs:
if content is not None:
self.parse(content)
urlNumber += 1
if not self.unvisitedURLs:
break
content = self.loadURL(self.unvisitedURLs.pop())
def parse(self, content):
failed = False
p = html5lib.HTMLParser(tree=etree.TreeBuilder)
try:
tree = p.parse(content)
except:
self.buggyURLs.add(self.currentURL)
failed = True
print("BUGGY:", self.currentURL)
self.visitedURLs.add(self.currentURL)
if not failed:
self.updateURLs(tree)
def loadURL(self, url):
resp, content = self.http.request(url, "GET")
self.currentURL = url
digest = md5.md5(content).hexdigest()
if digest in self.contentDigest:
content = None
self.visitedURLs.add(url)
else:
self.contentDigest[digest] = url
if resp['status'] != "200":
content = None
return content
def updateURLs(self, tree):
"""Take all the links in the current document, extract the URLs and
update the list of visited and unvisited URLs according to whether we
have seen them before or not"""
urls = set()
#Remove all links we have already visited
for link in tree.findall(".//a"):
try:
url = urllib.parse.urldefrag(link.attrib['href'])[0]
if (url and url not in self.unvisitedURLs and url
not in self.visitedURLs):
urls.add(url)
except KeyError:
pass
#Remove all non-http URLs and a dd a sutiable base URL where that is
#missing
newUrls = set()
for url in urls:
splitURL = list(urllib.parse.urlsplit(url))
if splitURL[0] != "http":
continue
if splitURL[1] == "":
splitURL[1] = urllib.parse.urlsplit(self.currentURL)[1]
newUrls.add(urllib.parse.urlunsplit(splitURL))
urls = newUrls
responseHeaders = {}
#Now we want to find the content types of the links we haven't visited
for url in urls:
try:
resp, content = self.http.request(url, "HEAD")
responseHeaders[url] = resp
except AttributeError as KeyError:
#Don't know why this happens
pass
#Remove links not of content-type html or pages not found
#XXX - need to deal with other status codes?
toVisit = set([url for url in urls if url in responseHeaders and
"html" in responseHeaders[url]['content-type'] and
responseHeaders[url]['status'] == "200"])
#Now check we are allowed to spider the page
for url in toVisit:
robotURL = list(urllib.parse.urlsplit(url)[:2])
robotURL.extend(["robots.txt", "", ""])
robotURL = urllib.parse.urlunsplit(robotURL)
self.robotParser.set_url(robotURL)
if not self.robotParser.can_fetch("*", url):
toVisit.remove(url)
self.visitedURLs.update(urls)
self.unvisitedURLs.update(toVisit)
| bsd-3-clause |
r0balo/pelisalacarta | python/main-classic/channels/peliculasnu.py | 1 | 10999 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculas.nu
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urllib
from core import config
from core import httptools
from core import jsontools
from core import logger
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
__modo_grafico__ = config.get_setting("modo_grafico", "peliculasnu")
__perfil__ = int(config.get_setting("perfil", "peliculasnu"))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
host = "http://peliculas.nu/"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Novedades", action="entradas", url=host, fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="Más Vistas", action="entradas", url=host+"mas-vistas", fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="Mejor Valoradas", action="entradas", url=host+"mejor-valoradas", fanart="http://i.imgur.com/c3HS8kj.png"))
item.text_color = color2
itemlist.append(item.clone(title="En Español", action="entradas", url=host+"?s=Español", fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="En Latino", action="entradas", url=host+"?s=Latino", fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="En VOSE", action="entradas", url=host+"?s=VOSE", fanart="http://i.imgur.com/c3HS8kj.png"))
item.text_color = color3
itemlist.append(item.clone(title="Por género", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="Por letra", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url= "%s?s=%s" % (host, texto)
return entradas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.from_newest = True
item.action = "entradas"
itemlist = entradas(item)
if itemlist[-1].action == "entradas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def entradas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="TPostMv">.*?href="([^"]+)".*?src="([^"]+)".*?class="Title">([^<]+)<.*?' \
'.*?"Date AAIco-date_range">(\d+).*?class="Qlty">([^<]+)<.*?<p class="Idioma(.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
if item.extra == "next":
matches_ = matches[15:]
else:
matches_ = matches[:15]
for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad, data_idioma in matches_:
idiomas = []
if "/espm" in data_idioma:
idiomas.append("CAST")
if "/latinom" in data_idioma:
idiomas.append("LAT")
if "/vosemi" in data_idioma:
idiomas.append("VOSE")
titulo = "%s [%s]" % (scrapedtitle, calidad)
if idiomas:
titulo += " [%s]" % "/".join(idiomas)
scrapedthumbnail = scrapedthumbnail.replace("-160x242", "")
infolabels = {'year': year}
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle))
if not item.from_newest:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if not item.extra and len(matches) > 15:
itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3))
elif item.extra == "next":
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra=""))
return itemlist
def listado(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td class="MvTbImg">.*?href="([^"]+)".*?src="([^"]+)".*?<strong>([^<]+)<.*?' \
'.*?<td>(\d+).*?class="Qlty">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
if item.extra == "next":
matches_ = matches[15:]
else:
matches_ = matches[:15]
for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad in matches_:
titulo = "%s [%s]" % (scrapedtitle, calidad)
scrapedthumbnail = scrapedthumbnail.replace("-55x85", "")
infolabels = {'year': year}
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if not item.extra and len(matches) > 15:
itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3))
elif item.extra == "next":
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra=""))
return itemlist
def indices(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
if "letra" in item.title:
action = "listado"
bloque = scrapertools.find_single_match(data, '<ul class="AZList">(.*?)</ul>')
else:
action = "entradas"
bloque = scrapertools.find_single_match(data, 'Géneros</a>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li.*?<a href="([^"]+)">([^<]+)</a>')
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action=action, url=scrapedurl, title=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="Description">.*?<p>(.*?)</p>')
fanart = scrapertools.find_single_match(data, '<img class="TPostBg" src="([^"]+)"')
if not item.fanart and fanart:
item.fanart = fanart
patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for tipo, source, title in matches:
if tipo == "trailer":
continue
post = "source=%s&action=obtenerurl" % urllib.quote(source)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
data_url = httptools.downloadpage(host+'wp-admin/admin-ajax.php', post, headers=headers).data
url = jsontools.load_json(data_url).get("url")
if "online.desmix" in url or "metiscs" in url:
server = "directo"
elif "openload" in url:
server = "openload"
url += "|Referer=" + item.url
else:
server = servertools.get_server_from_url(url)
title = "%s - %s" % (unicode(server, "utf8").capitalize().encode("utf8"), title)
itemlist.append(item.clone(action="play", url=url, title=title, server=server, text_color=color3))
if item.extra != "findvideos" and config.get_library_support():
itemlist.append(item.clone(title="Añadir película a la biblioteca", action="add_pelicula_to_library",
extra="findvideos", text_color="green"))
return itemlist
def play(item):
logger.info()
itemlist = []
if "drive.php?v=" in item.url:
if not item.url.startswith("http:"):
item.url = "http:" + item.url
data = httptools.downloadpage(item.url).data
subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'")
patron = '{"label":\s*"([^"]+)","type":\s*"video/([^"]+)","src":\s*"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, extension, url in matches:
url = url.replace(",", "%2C")
title = ".%s %s [directo]" % (extension, calidad)
itemlist.append([title, url, 0, subtitulo])
try:
itemlist.sort(key=lambda it:int(it[0].split(" ")[1].split("p")[0]))
except:
pass
elif "metiscs" in item.url:
if not item.url.startswith("http:"):
item.url = "http:" + item.url
referer = {'Referer': "http://peliculas.nu"}
data = httptools.downloadpage(item.url, headers=referer).data
from lib import jsunpack
packed = scrapertools.find_single_match(data, '<script type="text/javascript">(eval\(function.*?)</script>')
data_js = jsunpack.unpack(packed)
patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"'
matches = scrapertools.find_multiple_matches(data_js, patron)
for url, calidad, extension in matches:
url = url.replace(",", "%2C")
title = ".%s %s [directo]" % (extension, calidad)
itemlist.insert(0, [title, url])
else:
enlaces = servertools.findvideosbyserver(item.url, item.server)[0]
if len(enlaces) > 0:
itemlist.append(item.clone(action="play", server=enlaces[2], url=enlaces[1]))
return itemlist
| gpl-3.0 |
Samsung/meminsight | lifetime-analysis/delta-staleness.py | 1 | 2439 | #
# Copyright (c) 2014 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import tempfile
import shutil
import subprocess
annex = True
def gen_wrapper_html(js_files,inline=None):
script_tags = ["<script src=\"%s\"></script>"%os.path.basename(x) for x in js_files]
# create dummy HTML file loading js_file in /tmp
html = "<html><head></head><body>"
if inline != None:
html += "<script>" + inline + "</script>"
html += "%s</body></html>"%"".join(script_tags)
return html
def gen_wrapper_html_file(js_files, filename,inline=None):
html = gen_wrapper_html(js_files,inline)
dummy_file = open(filename, "w")
dummy_file.write(html)
dummy_file.close()
arg = os.path.abspath(sys.argv[1])
dir = os.path.dirname(arg)
if annex:
appTempDir = os.path.abspath("../../annex")
shutil.copy(arg,os.path.join(appTempDir,"combined.js"))
else:
appTempDir = tempfile.mkdtemp()
print "app temp dir " + appTempDir
shutil.copy(arg,appTempDir)
gen_wrapper_html_file([arg],os.path.join(appTempDir,'index.html'))
instTempDir = tempfile.mkdtemp()
print "inst temp dir " + instTempDir
genTraceCmd = ['node',
os.path.join(os.path.dirname(os.path.realpath(__file__)),'../memory-trace/drivers/memTraceDriver.js'),
'--outputDir',
instTempDir,
appTempDir]
sp = subprocess.Popen(genTraceCmd,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
print out
print >> sys.stderr, err
refCountCmd = "java -ea -Dtesting=yes -jar build/libs/memory-analysis-v2-all.jar --staleness --trace".split(' ')
refCountCmd.append(os.path.join(instTempDir,os.path.basename(appTempDir),'mem-trace'))
sp = subprocess.Popen(refCountCmd,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
print out
print >> sys.stderr, err
if not annex:
shutil.rmtree(appTempDir)
shutil.rmtree(instTempDir)
| apache-2.0 |
andymckay/addons-server | src/olympia/files/helpers.py | 2 | 15973 | import codecs
import mimetypes
import os
import stat
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_unicode
from django.utils.translation import get_language, ugettext as _
from django.template.defaultfilters import filesizeformat
from validator.testcases.packagelayout import (
blacklisted_extensions, blacklisted_magic_numbers)
import jinja2
import commonware.log
from cache_nuggets.lib import memoize, Message
from jingo import register, get_env
from olympia import amo
from olympia.amo.utils import rm_local_tmp_dir
from olympia.amo.urlresolvers import reverse
from olympia.files.utils import extract_xpi, get_md5
# Allow files with a shebang through.
blacklisted_magic_numbers = [b for b in list(blacklisted_magic_numbers)
if b != (0x23, 0x21)]
blacklisted_extensions = [b for b in list(blacklisted_extensions)
if b != 'sh']
task_log = commonware.log.getLogger('z.task')
@register.function
def file_viewer_class(value, key):
result = []
if value['directory']:
result.append('directory closed')
else:
result.append('file')
if value['short'] == key:
result.append('selected')
if value.get('diff'):
result.append('diff')
return ' '.join(result)
@register.function
def file_tree(files, selected):
depth = 0
output = ['<ul class="root">']
t = get_env().get_template('files/node.html')
for k, v in files.items():
if v['depth'] > depth:
output.append('<ul class="js-hidden">')
elif v['depth'] < depth:
output.extend(['</ul>' for x in range(v['depth'], depth)])
output.append(t.render({'value': v, 'selected': selected}))
depth = v['depth']
output.extend(['</ul>' for x in range(depth, -1, -1)])
return jinja2.Markup('\n'.join(output))
class FileViewer(object):
"""
Provide access to a storage-managed file by copying it locally and
extracting info from it. `src` is a storage-managed path and `dest` is a
local temp path.
"""
def __init__(self, file_obj):
self.file = file_obj
self.addon = self.file.version.addon
self.src = file_obj.current_file_path
self.dest = os.path.join(settings.TMP_PATH, 'file_viewer',
str(file_obj.pk))
self._files, self.selected = None, None
def __str__(self):
return str(self.file.id)
def _extraction_cache_key(self):
return ('%s:file-viewer:extraction-in-progress:%s' %
(settings.CACHE_PREFIX, self.file.id))
def extract(self):
"""
Will make all the directories and expand the files.
Raises error on nasty files.
"""
try:
os.makedirs(os.path.dirname(self.dest))
except OSError, err:
pass
if self.is_search_engine() and self.src.endswith('.xml'):
try:
os.makedirs(self.dest)
except OSError, err:
pass
copyfileobj(storage.open(self.src),
open(os.path.join(self.dest,
self.file.filename), 'w'))
else:
try:
extract_xpi(self.src, self.dest, expand=True)
except Exception, err:
task_log.error('Error (%s) extracting %s' % (err, self.src))
raise
def cleanup(self):
if os.path.exists(self.dest):
rm_local_tmp_dir(self.dest)
def is_search_engine(self):
"""Is our file for a search engine?"""
return self.file.version.addon.type == amo.ADDON_SEARCH
def is_extracted(self):
"""If the file has been extracted or not."""
return (os.path.exists(self.dest) and not
Message(self._extraction_cache_key()).get())
def _is_binary(self, mimetype, path):
"""Uses the filename to see if the file can be shown in HTML or not."""
# Re-use the blacklisted data from amo-validator to spot binaries.
ext = os.path.splitext(path)[1][1:]
if ext in blacklisted_extensions:
return True
if os.path.exists(path) and not os.path.isdir(path):
with storage.open(path, 'r') as rfile:
bytes = tuple(map(ord, rfile.read(4)))
if any(bytes[:len(x)] == x for x in blacklisted_magic_numbers):
return True
if mimetype:
major, minor = mimetype.split('/')
if major == 'image':
return 'image' # Mark that the file is binary, but an image.
return False
def read_file(self, allow_empty=False):
"""
Reads the file. Imposes a file limit and tries to cope with
UTF-8 and UTF-16 files appropriately. Return file contents and
a list of error messages.
"""
try:
file_data = self._read_file(allow_empty)
return file_data
except (IOError, OSError):
self.selected['msg'] = _('That file no longer exists.')
return ''
def _read_file(self, allow_empty=False):
if not self.selected and allow_empty:
return ''
assert self.selected, 'Please select a file'
if self.selected['size'] > settings.FILE_VIEWER_SIZE_LIMIT:
# L10n: {0} is the file size limit of the file viewer.
msg = _(u'File size is over the limit of {0}.').format(
filesizeformat(settings.FILE_VIEWER_SIZE_LIMIT))
self.selected['msg'] = msg
return ''
with storage.open(self.selected['full'], 'r') as opened:
cont = opened.read()
codec = 'utf-16' if cont.startswith(codecs.BOM_UTF16) else 'utf-8'
try:
return cont.decode(codec)
except UnicodeDecodeError:
cont = cont.decode(codec, 'ignore')
# L10n: {0} is the filename.
self.selected['msg'] = (
_('Problems decoding {0}.').format(codec))
return cont
def select(self, file_):
self.selected = self.get_files().get(file_)
def is_binary(self):
if self.selected:
binary = self.selected['binary']
if binary and (binary != 'image'):
self.selected['msg'] = _('This file is not viewable online. '
'Please download the file to view '
'the contents.')
return binary
def is_directory(self):
if self.selected:
if self.selected['directory']:
self.selected['msg'] = _('This file is a directory.')
return self.selected['directory']
def get_default(self, key=None):
"""Gets the default file and copes with search engines."""
if key:
return key
files = self.get_files()
for manifest in ('install.rdf', 'manifest.json', 'package.json'):
if manifest in files:
return manifest
return files.keys()[0] if files else None # Eg: it's a search engine.
def get_files(self):
"""
Returns a SortedDict, ordered by the filename of all the files in the
addon-file. Full of all the useful information you'll need to serve
this file, build templates etc.
"""
if self._files:
return self._files
if not self.is_extracted():
return {}
# In case a cron job comes along and deletes the files
# mid tree building.
try:
self._files = self._get_files(locale=get_language())
return self._files
except (OSError, IOError):
return {}
def truncate(self, filename, pre_length=15,
post_length=10, ellipsis=u'..'):
"""
Truncates a filename so that
somelongfilename.htm
becomes:
some...htm
as it truncates around the extension.
"""
root, ext = os.path.splitext(filename)
if len(root) > pre_length:
root = root[:pre_length] + ellipsis
if len(ext) > post_length:
ext = ext[:post_length] + ellipsis
return root + ext
def get_syntax(self, filename):
"""
Converts a filename into a syntax for the syntax highlighter, with
some modifications for specific common mozilla files.
The list of syntaxes is from:
http://alexgorbatchev.com/SyntaxHighlighter/manual/brushes/
"""
if filename:
short = os.path.splitext(filename)[1][1:]
syntax_map = {'xul': 'xml', 'rdf': 'xml', 'jsm': 'js',
'json': 'js'}
short = syntax_map.get(short, short)
if short in ['actionscript3', 'as3', 'bash', 'shell', 'cpp', 'c',
'c#', 'c-sharp', 'csharp', 'css', 'diff', 'html',
'java', 'javascript', 'js', 'jscript', 'patch',
'pas', 'php', 'plain', 'py', 'python', 'sass',
'scss', 'text', 'sql', 'vb', 'vbnet', 'xml', 'xhtml',
'xslt']:
return short
return 'plain'
@memoize(prefix='file-viewer', time=60 * 60)
def _get_files(self, locale=None):
"""We need the `locale` parameter for the memoization.
The `@memoize` decorator uses the prefix *and the parameters* to come
up with a memoize key. We thus add a (seemingly useless) `locale`
parameter.
Otherwise, we would just always have the urls for the files with the
locale from the first person checking them.
"""
all_files, res = [], SortedDict()
# Not using os.path.walk so we get just the right order.
def iterate(path):
path_dirs, path_files = storage.listdir(path)
for dirname in sorted(path_dirs):
full = os.path.join(path, dirname)
all_files.append(full)
iterate(full)
for filename in sorted(path_files):
full = os.path.join(path, filename)
all_files.append(full)
iterate(self.dest)
for path in all_files:
filename = smart_unicode(os.path.basename(path), errors='replace')
short = smart_unicode(path[len(self.dest) + 1:], errors='replace')
mime, encoding = mimetypes.guess_type(filename)
directory = os.path.isdir(path)
res[short] = {
'binary': self._is_binary(mime, path),
'depth': short.count(os.sep),
'directory': directory,
'filename': filename,
'full': path,
'md5': get_md5(path) if not directory else '',
'mimetype': mime or 'application/octet-stream',
'syntax': self.get_syntax(filename),
'modified': os.stat(path)[stat.ST_MTIME],
'short': short,
'size': os.stat(path)[stat.ST_SIZE],
'truncated': self.truncate(filename),
'url': reverse('files.list',
args=[self.file.id, 'file', short]),
'url_serve': reverse('files.redirect',
args=[self.file.id, short]),
'version': self.file.version.version,
}
return res
class DiffHelper(object):
def __init__(self, left, right):
self.left = FileViewer(left)
self.right = FileViewer(right)
self.addon = self.left.addon
self.key = None
def __str__(self):
return '%s:%s' % (self.left, self.right)
def extract(self):
self.left.extract(), self.right.extract()
def cleanup(self):
self.left.cleanup(), self.right.cleanup()
def is_extracted(self):
return self.left.is_extracted() and self.right.is_extracted()
def get_url(self, short):
return reverse('files.compare',
args=[self.left.file.id, self.right.file.id,
'file', short])
def get_files(self):
"""
Get the files from the primary and:
- remap any diffable ones to the compare url as opposed to the other
- highlight any diffs
"""
left_files = self.left.get_files()
right_files = self.right.get_files()
different = []
for key, file in left_files.items():
file['url'] = self.get_url(file['short'])
diff = file['md5'] != right_files.get(key, {}).get('md5')
file['diff'] = diff
if diff:
different.append(file)
# Now mark every directory above each different file as different.
for diff in different:
for depth in range(diff['depth']):
key = '/'.join(diff['short'].split('/')[:depth + 1])
if key in left_files:
left_files[key]['diff'] = True
return left_files
def get_deleted_files(self):
"""
Get files that exist in right, but not in left. These
are files that have been deleted between the two versions.
Every element will be marked as a diff.
"""
different = SortedDict()
if self.right.is_search_engine():
return different
def keep(path):
if path not in different:
copy = dict(right_files[path])
copy.update({'url': self.get_url(file['short']), 'diff': True})
different[path] = copy
left_files = self.left.get_files()
right_files = self.right.get_files()
for key, file in right_files.items():
if key not in left_files:
# Make sure we have all the parent directories of
# deleted files.
dir = key
while os.path.dirname(dir):
dir = os.path.dirname(dir)
keep(dir)
keep(key)
return different
def read_file(self):
"""Reads both selected files."""
return [self.left.read_file(allow_empty=True),
self.right.read_file(allow_empty=True)]
def select(self, key):
"""
Select a file and adds the file object to self.one and self.two
for later fetching. Does special work for search engines.
"""
self.key = key
self.left.select(key)
if key and self.right.is_search_engine():
# There's only one file in a search engine.
key = self.right.get_default()
self.right.select(key)
return self.left.selected and self.right.selected
def is_binary(self):
"""Tells you if both selected files are binary."""
return (self.left.is_binary() or
self.right.is_binary())
def is_diffable(self):
"""Tells you if the selected files are diffable."""
if not self.left.selected and not self.right.selected:
return False
for obj in [self.left, self.right]:
if obj.is_binary():
return False
if obj.is_directory():
return False
return True
def copyfileobj(fsrc, fdst, length=64 * 1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while True:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def rmtree(prefix):
dirs, files = storage.listdir(prefix)
for fname in files:
storage.delete(os.path.join(prefix, fname))
for d in dirs:
rmtree(os.path.join(prefix, d))
storage.delete(prefix)
| bsd-3-clause |
alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/unidecode/x067.py | 252 | 4635 | data = (
'Zui ', # 0x00
'Can ', # 0x01
'Xu ', # 0x02
'Hui ', # 0x03
'Yin ', # 0x04
'Qie ', # 0x05
'Fen ', # 0x06
'Pi ', # 0x07
'Yue ', # 0x08
'You ', # 0x09
'Ruan ', # 0x0a
'Peng ', # 0x0b
'Ban ', # 0x0c
'Fu ', # 0x0d
'Ling ', # 0x0e
'Fei ', # 0x0f
'Qu ', # 0x10
'[?] ', # 0x11
'Nu ', # 0x12
'Tiao ', # 0x13
'Shuo ', # 0x14
'Zhen ', # 0x15
'Lang ', # 0x16
'Lang ', # 0x17
'Juan ', # 0x18
'Ming ', # 0x19
'Huang ', # 0x1a
'Wang ', # 0x1b
'Tun ', # 0x1c
'Zhao ', # 0x1d
'Ji ', # 0x1e
'Qi ', # 0x1f
'Ying ', # 0x20
'Zong ', # 0x21
'Wang ', # 0x22
'Tong ', # 0x23
'Lang ', # 0x24
'[?] ', # 0x25
'Meng ', # 0x26
'Long ', # 0x27
'Mu ', # 0x28
'Deng ', # 0x29
'Wei ', # 0x2a
'Mo ', # 0x2b
'Ben ', # 0x2c
'Zha ', # 0x2d
'Zhu ', # 0x2e
'Zhu ', # 0x2f
'[?] ', # 0x30
'Zhu ', # 0x31
'Ren ', # 0x32
'Ba ', # 0x33
'Po ', # 0x34
'Duo ', # 0x35
'Duo ', # 0x36
'Dao ', # 0x37
'Li ', # 0x38
'Qiu ', # 0x39
'Ji ', # 0x3a
'Jiu ', # 0x3b
'Bi ', # 0x3c
'Xiu ', # 0x3d
'Ting ', # 0x3e
'Ci ', # 0x3f
'Sha ', # 0x40
'Eburi ', # 0x41
'Za ', # 0x42
'Quan ', # 0x43
'Qian ', # 0x44
'Yu ', # 0x45
'Gan ', # 0x46
'Wu ', # 0x47
'Cha ', # 0x48
'Shan ', # 0x49
'Xun ', # 0x4a
'Fan ', # 0x4b
'Wu ', # 0x4c
'Zi ', # 0x4d
'Li ', # 0x4e
'Xing ', # 0x4f
'Cai ', # 0x50
'Cun ', # 0x51
'Ren ', # 0x52
'Shao ', # 0x53
'Tuo ', # 0x54
'Di ', # 0x55
'Zhang ', # 0x56
'Mang ', # 0x57
'Chi ', # 0x58
'Yi ', # 0x59
'Gu ', # 0x5a
'Gong ', # 0x5b
'Du ', # 0x5c
'Yi ', # 0x5d
'Qi ', # 0x5e
'Shu ', # 0x5f
'Gang ', # 0x60
'Tiao ', # 0x61
'Moku ', # 0x62
'Soma ', # 0x63
'Tochi ', # 0x64
'Lai ', # 0x65
'Sugi ', # 0x66
'Mang ', # 0x67
'Yang ', # 0x68
'Ma ', # 0x69
'Miao ', # 0x6a
'Si ', # 0x6b
'Yuan ', # 0x6c
'Hang ', # 0x6d
'Fei ', # 0x6e
'Bei ', # 0x6f
'Jie ', # 0x70
'Dong ', # 0x71
'Gao ', # 0x72
'Yao ', # 0x73
'Xian ', # 0x74
'Chu ', # 0x75
'Qun ', # 0x76
'Pa ', # 0x77
'Shu ', # 0x78
'Hua ', # 0x79
'Xin ', # 0x7a
'Chou ', # 0x7b
'Zhu ', # 0x7c
'Chou ', # 0x7d
'Song ', # 0x7e
'Ban ', # 0x7f
'Song ', # 0x80
'Ji ', # 0x81
'Yue ', # 0x82
'Jin ', # 0x83
'Gou ', # 0x84
'Ji ', # 0x85
'Mao ', # 0x86
'Pi ', # 0x87
'Bi ', # 0x88
'Wang ', # 0x89
'Ang ', # 0x8a
'Fang ', # 0x8b
'Fen ', # 0x8c
'Yi ', # 0x8d
'Fu ', # 0x8e
'Nan ', # 0x8f
'Xi ', # 0x90
'Hu ', # 0x91
'Ya ', # 0x92
'Dou ', # 0x93
'Xun ', # 0x94
'Zhen ', # 0x95
'Yao ', # 0x96
'Lin ', # 0x97
'Rui ', # 0x98
'E ', # 0x99
'Mei ', # 0x9a
'Zhao ', # 0x9b
'Guo ', # 0x9c
'Zhi ', # 0x9d
'Cong ', # 0x9e
'Yun ', # 0x9f
'Waku ', # 0xa0
'Dou ', # 0xa1
'Shu ', # 0xa2
'Zao ', # 0xa3
'[?] ', # 0xa4
'Li ', # 0xa5
'Haze ', # 0xa6
'Jian ', # 0xa7
'Cheng ', # 0xa8
'Matsu ', # 0xa9
'Qiang ', # 0xaa
'Feng ', # 0xab
'Nan ', # 0xac
'Xiao ', # 0xad
'Xian ', # 0xae
'Ku ', # 0xaf
'Ping ', # 0xb0
'Yi ', # 0xb1
'Xi ', # 0xb2
'Zhi ', # 0xb3
'Guai ', # 0xb4
'Xiao ', # 0xb5
'Jia ', # 0xb6
'Jia ', # 0xb7
'Gou ', # 0xb8
'Fu ', # 0xb9
'Mo ', # 0xba
'Yi ', # 0xbb
'Ye ', # 0xbc
'Ye ', # 0xbd
'Shi ', # 0xbe
'Nie ', # 0xbf
'Bi ', # 0xc0
'Duo ', # 0xc1
'Yi ', # 0xc2
'Ling ', # 0xc3
'Bing ', # 0xc4
'Ni ', # 0xc5
'La ', # 0xc6
'He ', # 0xc7
'Pan ', # 0xc8
'Fan ', # 0xc9
'Zhong ', # 0xca
'Dai ', # 0xcb
'Ci ', # 0xcc
'Yang ', # 0xcd
'Fu ', # 0xce
'Bo ', # 0xcf
'Mou ', # 0xd0
'Gan ', # 0xd1
'Qi ', # 0xd2
'Ran ', # 0xd3
'Rou ', # 0xd4
'Mao ', # 0xd5
'Zhao ', # 0xd6
'Song ', # 0xd7
'Zhe ', # 0xd8
'Xia ', # 0xd9
'You ', # 0xda
'Shen ', # 0xdb
'Ju ', # 0xdc
'Tuo ', # 0xdd
'Zuo ', # 0xde
'Nan ', # 0xdf
'Ning ', # 0xe0
'Yong ', # 0xe1
'Di ', # 0xe2
'Zhi ', # 0xe3
'Zha ', # 0xe4
'Cha ', # 0xe5
'Dan ', # 0xe6
'Gu ', # 0xe7
'Pu ', # 0xe8
'Jiu ', # 0xe9
'Ao ', # 0xea
'Fu ', # 0xeb
'Jian ', # 0xec
'Bo ', # 0xed
'Duo ', # 0xee
'Ke ', # 0xef
'Nai ', # 0xf0
'Zhu ', # 0xf1
'Bi ', # 0xf2
'Liu ', # 0xf3
'Chai ', # 0xf4
'Zha ', # 0xf5
'Si ', # 0xf6
'Zhu ', # 0xf7
'Pei ', # 0xf8
'Shi ', # 0xf9
'Guai ', # 0xfa
'Cha ', # 0xfb
'Yao ', # 0xfc
'Jue ', # 0xfd
'Jiu ', # 0xfe
'Shi ', # 0xff
)
| agpl-3.0 |
sodafree/backend | build/ipython/build/lib.linux-i686-2.7/IPython/core/magics/execution.py | 3 | 40995 | """Implementation of execution-related magic functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import __builtin__ as builtin_mod
import bdb
import os
import sys
import time
from StringIO import StringIO
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile, pstats
except ImportError:
profile = pstats = None
# Our own packages
from IPython.core import debugger, oinspect
from IPython.core import magic_arguments
from IPython.core import page
from IPython.core.error import UsageError
from IPython.core.macro import Macro
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
line_cell_magic, on_off, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import py3compat
from IPython.utils.io import capture_output
from IPython.utils.ipstruct import Struct
from IPython.utils.module_paths import find_mod
from IPython.utils.path import get_py_filename, unquote_filename
from IPython.utils.timing import clock, clock2
from IPython.utils.warn import warn, error
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class ExecutionMagics(Magics):
"""Magics related to code execution, debugging, profiling, etc.
"""
def __init__(self, shell):
super(ExecutionMagics, self).__init__(shell)
if profile is None:
self.prun = self.profile_missing_notice
# Default execution function used to actually run user code.
self.default_runner = None
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
@skip_doctest
@line_cell_magic
def prun(self, parameter_s='', cell=None, user_mode=True,
opts=None,arg_lst=None,prog_ns=None):
"""Run a statement through the python code profiler.
Usage, in line mode:
%prun [options] statement
Usage, in cell mode:
%%prun [options] [statement]
code...
code...
In cell mode, the additional code lines are appended to the (possibly
empty) statement in the first line. Cell mode allows you to easily
profile multiline blocks without having to put them in a separate
function.
The given statement (which doesn't require quote marks) is run via the
python profiler in a manner similar to the profile.run() function.
Namespaces are internally managed to work correctly; profile.run
cannot be used in IPython because it makes certain assumptions about
namespaces which do not hold under IPython.
Options:
-l <limit>: you can place restrictions on what or how much of the
profile gets printed. The limit value can be:
* A string: only information for function names containing this string
is printed.
* An integer: only these many lines are printed.
* A float (between 0 and 1): this fraction of the report is printed
(for example, use a limit of 0.4 to see the topmost 40% only).
You can combine several limits with repeated use of the option. For
example, '-l __init__ -l 5' will print only the topmost 5 lines of
information about class constructors.
-r: return the pstats.Stats object generated by the profiling. This
object has all the information about the profile in it, and you can
later use it for further analysis or in other functions.
-s <key>: sort profile by given key. You can provide more than one key
by using the option several times: '-s key1 -s key2 -s key3...'. The
default sorting key is 'time'.
The following is copied verbatim from the profile documentation
referenced below:
When more than one key is provided, additional keys are used as
secondary criteria when the there is equality in all keys selected
before them.
Abbreviations can be used for any key names, as long as the
abbreviation is unambiguous. The following are the keys currently
defined:
Valid Arg Meaning
"calls" call count
"cumulative" cumulative time
"file" file name
"module" file name
"pcalls" primitive call count
"line" line number
"name" function name
"nfl" name/file/line
"stdname" standard name
"time" internal time
Note that all sorts on statistics are in descending order (placing
most time consuming items first), where as name, file, and line number
searches are in ascending order (i.e., alphabetical). The subtle
distinction between "nfl" and "stdname" is that the standard name is a
sort of the name as printed, which means that the embedded line
numbers get compared in an odd way. For example, lines 3, 20, and 40
would (if the file names were the same) appear in the string order
"20" "3" and "40". In contrast, "nfl" does a numeric compare of the
line numbers. In fact, sort_stats("nfl") is the same as
sort_stats("name", "file", "line").
-T <filename>: save profile results as shown on screen to a text
file. The profile is still shown on screen.
-D <filename>: save (via dump_stats) profile statistics to given
filename. This data is in a format understood by the pstats module, and
is generated by a call to the dump_stats() method of profile
objects. The profile is still shown on screen.
-q: suppress output to the pager. Best used with -T and/or -D above.
If you want to run complete programs under the profiler's control, use
'%run -p [prof_opts] filename.py [args to program]' where prof_opts
contains profiler specific options as described here.
You can read the complete documentation for the profile module with::
In [1]: import profile; profile.help()
"""
opts_def = Struct(D=[''],l=[],s=['time'],T=[''])
if user_mode: # regular user call
opts,arg_str = self.parse_options(parameter_s,'D:l:rs:T:q',
list_all=True, posix=False)
namespace = self.shell.user_ns
if cell is not None:
arg_str += '\n' + cell
else: # called to run a program by %run -p
try:
filename = get_py_filename(arg_lst[0])
except IOError as e:
try:
msg = str(e)
except UnicodeError:
msg = e.message
error(msg)
return
arg_str = 'execfile(filename,prog_ns)'
namespace = {
'execfile': self.shell.safe_execfile,
'prog_ns': prog_ns,
'filename': filename
}
opts.merge(opts_def)
prof = profile.Profile()
try:
prof = prof.runctx(arg_str,namespace,namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
if hasattr(stats,'stream'):
# In newer versions of python, the stats object has a 'stream'
# attribute to write into.
stats.stream = stdout_trap
stats.print_stats(*lims)
else:
# For older versions, we manually redirect stdout during printing
sys_stdout = sys.stdout
try:
sys.stdout = stdout_trap
stats.print_stats(*lims)
finally:
sys.stdout = sys_stdout
output = stdout_trap.getvalue()
output = output.rstrip()
if 'q' not in opts:
page.page(output)
print sys_exit,
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
dump_file = unquote_filename(dump_file)
prof.dump_stats(dump_file)
print '\n*** Profile stats marshalled to file',\
`dump_file`+'.',sys_exit
if text_file:
text_file = unquote_filename(text_file)
pfile = open(text_file,'w')
pfile.write(output)
pfile.close()
print '\n*** Profile printout saved to text file',\
`text_file`+'.',sys_exit
if opts.has_key('r'):
return stats
else:
return None
@line_magic
def pdb(self, parameter_s=''):
"""Control the automatic calling of the pdb interactive debugger.
Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
argument it works as a toggle.
When an exception is triggered, IPython can optionally call the
interactive pdb debugger after the traceback printout. %pdb toggles
this feature on and off.
The initial state of this feature is set in your configuration
file (the option is ``InteractiveShell.pdb``).
If you want to just activate the debugger AFTER an exception has fired,
without having to type '%pdb on' and rerunning your code, you can use
the %debug magic."""
par = parameter_s.strip().lower()
if par:
try:
new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
except KeyError:
print ('Incorrect argument. Use on/1, off/0, '
'or nothing for a toggle.')
return
else:
# toggle
new_pdb = not self.shell.call_pdb
# set on the shell
self.shell.call_pdb = new_pdb
print 'Automatic pdb calling has been turned',on_off(new_pdb)
@line_magic
def debug(self, parameter_s=''):
"""Activate the interactive debugger in post-mortem mode.
If an exception has just occurred, this lets you inspect its stack
frames interactively. Note that this will always work only on the last
traceback that occurred, so you must call this quickly after an
exception that you wish to inspect has fired, because if another one
occurs, it clobbers the previous one.
If you want IPython to automatically do this on every exception, see
the %pdb magic for more details.
"""
self.shell.debugger(force=True)
@line_magic
def tb(self, s):
"""Print the last traceback with the currently active exception mode.
See %xmode for changing exception reporting modes."""
self.shell.showtraceback()
@skip_doctest
@line_magic
def run(self, parameter_s='', runner=None,
file_finder=get_py_filename):
"""Run the named file inside IPython as a program.
Usage:\\
%run [-n -i -t [-N<N>] -d [-b<N>] -p [profile options]] file [args]
Parameters after the filename are passed as command-line arguments to
the program (put in sys.argv). Then, control returns to IPython's
prompt.
This is similar to running at a system prompt:\\
$ python file args\\
but with the advantage of giving you IPython's tracebacks, and of
loading all variables into your interactive namespace for further use
(unless -p is used, see below).
The file is executed in a namespace initially consisting only of
__name__=='__main__' and sys.argv constructed as indicated. It thus
sees its environment as if it were being run as a stand-alone program
(except for sharing global objects such as previously imported
modules). But after execution, the IPython interactive namespace gets
updated with all variables defined in the program (except for __name__
and sys.argv). This allows for very convenient loading of code for
interactive work, while giving each program a 'clean sheet' to run in.
Options:
-n: __name__ is NOT set to '__main__', but to the running file's name
without extension (as python does under import). This allows running
scripts and reloading the definitions in them without calling code
protected by an ' if __name__ == "__main__" ' clause.
-i: run the file in IPython's namespace instead of an empty one. This
is useful if you are experimenting with code written in a text editor
which depends on variables defined interactively.
-e: ignore sys.exit() calls or SystemExit exceptions in the script
being run. This is particularly useful if IPython is being used to
run unittests, which always exit with a sys.exit() call. In such
cases you are interested in the output of the test results, not in
seeing a traceback of the unittest module.
-t: print timing information at the end of the run. IPython will give
you an estimated CPU time consumption for your script, which under
Unix uses the resource module to avoid the wraparound problems of
time.clock(). Under Unix, an estimate of time spent on system tasks
is also given (for Windows platforms this is reported as 0.0).
If -t is given, an additional -N<N> option can be given, where <N>
must be an integer indicating how many times you want the script to
run. The final timing report will include total and per run results.
For example (testing the script uniq_stable.py)::
In [1]: run -t uniq_stable
IPython CPU timings (estimated):\\
User : 0.19597 s.\\
System: 0.0 s.\\
In [2]: run -t -N5 uniq_stable
IPython CPU timings (estimated):\\
Total runs performed: 5\\
Times : Total Per run\\
User : 0.910862 s, 0.1821724 s.\\
System: 0.0 s, 0.0 s.
-d: run your program under the control of pdb, the Python debugger.
This allows you to execute your program step by step, watch variables,
etc. Internally, what IPython does is similar to calling:
pdb.run('execfile("YOURFILENAME")')
with a breakpoint set on line 1 of your file. You can change the line
number for this automatic breakpoint to be <N> by using the -bN option
(where N must be an integer). For example::
%run -d -b40 myscript
will set the first breakpoint at line 40 in myscript.py. Note that
the first breakpoint must be set on a line which actually does
something (not a comment or docstring) for it to stop execution.
When the pdb debugger starts, you will see a (Pdb) prompt. You must
first enter 'c' (without quotes) to start execution up to the first
breakpoint.
Entering 'help' gives information about the use of the debugger. You
can easily see pdb's full documentation with "import pdb;pdb.help()"
at a prompt.
-p: run program under the control of the Python profiler module (which
prints a detailed report of execution times, function calls, etc).
You can pass other options after -p which affect the behavior of the
profiler itself. See the docs for %prun for details.
In this mode, the program's variables do NOT propagate back to the
IPython interactive namespace (because they remain in the namespace
where the profiler executes them).
Internally this triggers a call to %prun, see its documentation for
details on the options available specifically for profiling.
There is one special usage for which the text above doesn't apply:
if the filename ends with .ipy, the file is run as ipython script,
just as if the commands were written on IPython prompt.
-m: specify module name to load instead of script path. Similar to
the -m option for the python interpreter. Use this option last if you
want to combine with other %run options. Unlike the python interpreter
only source modules are allowed no .pyc or .pyo files.
For example::
%run -m example
will run the example module.
"""
# get arguments and set sys.argv for program to be run.
opts, arg_lst = self.parse_options(parameter_s, 'nidtN:b:pD:l:rs:T:em:',
mode='list', list_all=1)
if "m" in opts:
modulename = opts["m"][0]
modpath = find_mod(modulename)
if modpath is None:
warn('%r is not a valid modulename on sys.path'%modulename)
return
arg_lst = [modpath] + arg_lst
try:
filename = file_finder(arg_lst[0])
except IndexError:
warn('you must provide at least a filename.')
print '\n%run:\n', oinspect.getdoc(self.run)
return
except IOError as e:
try:
msg = str(e)
except UnicodeError:
msg = e.message
error(msg)
return
if filename.lower().endswith('.ipy'):
self.shell.safe_execfile_ipy(filename)
return
# Control the response to exit() calls made by the script being run
exit_ignore = 'e' in opts
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv # save it for later restoring
# simulate shell expansion on arguments, at least tilde expansion
args = [ os.path.expanduser(a) for a in arg_lst[1:] ]
sys.argv = [filename] + args # put in the proper filename
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
if 'i' in opts:
# Run in user's interactive namespace
prog_ns = self.shell.user_ns
__name__save = self.shell.user_ns['__name__']
prog_ns['__name__'] = '__main__'
main_mod = self.shell.new_main_mod(prog_ns)
else:
# Run in a fresh, empty namespace
if 'n' in opts:
name = os.path.splitext(os.path.basename(filename))[0]
else:
name = '__main__'
main_mod = self.shell.new_main_mod()
prog_ns = main_mod.__dict__
prog_ns['__name__'] = name
# Since '%run foo' emulates 'python foo.py' at the cmd line, we must
# set the __file__ global in the script's namespace
prog_ns['__file__'] = filename
# pickle fix. See interactiveshell for an explanation. But we need to
# make sure that, if we overwrite __main__, we replace it at the end
main_mod_name = prog_ns['__name__']
if main_mod_name == '__main__':
restore_main = sys.modules['__main__']
else:
restore_main = False
# This needs to be undone at the end to prevent holding references to
# every single object ever created.
sys.modules[main_mod_name] = main_mod
try:
stats = None
with self.shell.readline_no_record:
if 'p' in opts:
stats = self.prun('', None, False, opts, arg_lst, prog_ns)
else:
if 'd' in opts:
deb = debugger.Pdb(self.shell.colors)
# reset Breakpoint state, which is moronically kept
# in a class
bdb.Breakpoint.next = 1
bdb.Breakpoint.bplist = {}
bdb.Breakpoint.bpbynumber = [None]
# Set an initial breakpoint to stop execution
maxtries = 10
bp = int(opts.get('b', [1])[0])
checkline = deb.checkline(filename, bp)
if not checkline:
for bp in range(bp + 1, bp + maxtries + 1):
if deb.checkline(filename, bp):
break
else:
msg = ("\nI failed to find a valid line to set "
"a breakpoint\n"
"after trying up to line: %s.\n"
"Please set a valid breakpoint manually "
"with the -b option." % bp)
error(msg)
return
# if we find a good linenumber, set the breakpoint
deb.do_break('%s:%s' % (filename, bp))
# Start file run
print "NOTE: Enter 'c' at the",
print "%s prompt to start your script." % deb.prompt
ns = {'execfile': py3compat.execfile, 'prog_ns': prog_ns}
try:
deb.run('execfile("%s", prog_ns)' % filename, ns)
except:
etype, value, tb = sys.exc_info()
# Skip three frames in the traceback: the %run one,
# one inside bdb.py, and the command-line typed by the
# user (run by exec in pdb itself).
self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
else:
if runner is None:
runner = self.default_runner
if runner is None:
runner = self.shell.safe_execfile
if 't' in opts:
# timed execution
try:
nruns = int(opts['N'][0])
if nruns < 1:
error('Number of runs must be >=1')
return
except (KeyError):
nruns = 1
twall0 = time.time()
if nruns == 1:
t0 = clock2()
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print "\nIPython CPU timings (estimated):"
print " User : %10.2f s." % t_usr
print " System : %10.2f s." % t_sys
else:
runs = range(nruns)
t0 = clock2()
for nr in runs:
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print "\nIPython CPU timings (estimated):"
print "Total runs performed:", nruns
print " Times : %10.2f %10.2f" % ('Total', 'Per run')
print " User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns)
print " System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns)
twall1 = time.time()
print "Wall time: %10.2f s." % (twall1 - twall0)
else:
# regular execution
runner(filename, prog_ns, prog_ns, exit_ignore=exit_ignore)
if 'i' in opts:
self.shell.user_ns['__name__'] = __name__save
else:
# The shell MUST hold a reference to prog_ns so after %run
# exits, the python deletion mechanism doesn't zero it out
# (leaving dangling references).
self.shell.cache_main_mod(prog_ns, filename)
# update IPython interactive namespace
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
self.shell.user_ns.update(prog_ns)
finally:
# It's a bit of a mystery why, but __builtins__ can change from
# being a module to becoming a dict missing some key data after
# %run. As best I can see, this is NOT something IPython is doing
# at all, and similar problems have been reported before:
# http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
# Since this seems to be done by the interpreter itself, the best
# we can do is to at least restore __builtins__ for the user on
# exit.
self.shell.user_ns['__builtins__'] = builtin_mod
# Ensure key global structures are restored
sys.argv = save_argv
if restore_main:
sys.modules['__main__'] = restore_main
else:
# Remove from sys.modules the reference to main_mod we'd
# added. Otherwise it will trap references to objects
# contained therein.
del sys.modules[main_mod_name]
return stats
@skip_doctest
@line_cell_magic
def timeit(self, line='', cell=None):
"""Time execution of a Python statement or expression
Usage, in line mode:
%timeit [-n<N> -r<R> [-t|-c]] statement
or in cell mode:
%%timeit [-n<N> -r<R> [-t|-c]] setup_code
code
code...
Time execution of a Python statement or expression using the timeit
module. This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, the statement in the first line is used as setup code
(executed but not timed) and the body of the cell is timed. The cell
body has access to any variables created in the setup code.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
Examples
--------
::
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
import timeit
import math
# XXX: Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals. Until we figure out a robust way of
# auto-detecting if the terminal can deal with it, use plain 'us' for
# microseconds. I am really NOT happy about disabling the proper
# 'micro' prefix, but crashing is worse... If anyone knows what the
# right solution for this is, I'm all ears...
#
# Note: using
#
# s = u'\xb5'
# s.encode(sys.getdefaultencoding())
#
# is not sufficient, as I've seen terminals where that fails but
# print s
#
# succeeds
#
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
#units = [u"s", u"ms",u'\xb5',"ns"]
units = [u"s", u"ms",u'us',"ns"]
scaling = [1, 1e3, 1e6, 1e9]
opts, stmt = self.parse_options(line,'n:r:tcp:',
posix=False, strict=False)
if stmt == "" and cell is None:
return
timefunc = timeit.default_timer
number = int(getattr(opts, "n", 0))
repeat = int(getattr(opts, "r", timeit.default_repeat))
precision = int(getattr(opts, "p", 3))
if hasattr(opts, "t"):
timefunc = time.time
if hasattr(opts, "c"):
timefunc = clock
timer = timeit.Timer(timer=timefunc)
# this code has tight coupling to the inner workings of timeit.Timer,
# but is there a better way to achieve that the code stmt has access
# to the shell namespace?
transform = self.shell.input_splitter.transform_cell
if cell is None:
# called as line magic
setup = 'pass'
stmt = timeit.reindent(transform(stmt), 8)
else:
setup = timeit.reindent(transform(stmt), 4)
stmt = timeit.reindent(transform(cell), 8)
# From Python 3.3, this template uses new-style string formatting.
if sys.version_info >= (3, 3):
src = timeit.template.format(stmt=stmt, setup=setup)
else:
src = timeit.template % dict(stmt=stmt, setup=setup)
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
code = compile(src, "<magic-timeit>", "exec")
tc = clock()-t0
ns = {}
exec code in self.shell.user_ns, ns
timer.inner = ns["inner"]
if number == 0:
# determine number so that 0.2 <= total time < 2.0
number = 1
for i in range(1, 10):
if timer.timeit(number) >= 0.2:
break
number *= 10
best = min(timer.repeat(repeat, number)) / number
if best > 0.0 and best < 1000.0:
order = min(-int(math.floor(math.log10(best)) // 3), 3)
elif best >= 1000.0:
order = 0
else:
order = 3
print u"%d loops, best of %d: %.*g %s per loop" % (number, repeat,
precision,
best * scaling[order],
units[order])
if tc > tc_min:
print "Compiler time: %.2f s" % tc
@skip_doctest
@needs_local_scope
@line_magic
def time(self,parameter_s, user_locals):
"""Time execution of a Python statement or expression.
The CPU and wall clock times are printed, and the value of the
expression (if any) is returned. Note that under Win32, system time
is always reported as 0, since it can not be measured.
This function provides very basic timing functionality. In Python
2.3, the timeit module offers more control and sophistication, so this
could be rewritten to use it (patches welcome).
Examples
--------
::
In [1]: time 2**128
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Out[1]: 340282366920938463463374607431768211456L
In [2]: n = 1000000
In [3]: time sum(range(n))
CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
Wall time: 1.37
Out[3]: 499999500000L
In [4]: time print 'hello world'
hello world
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Note that the time needed by Python to compile the given expression
will be reported if it is more than 0.1s. In this example, the
actual exponentiation is done by Python at compilation time, so while
the expression can take a noticeable amount of time to compute, that
time is purely due to the compilation:
In [5]: time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
# fail immediately if the given expression can't be compiled
expr = self.shell.prefilter(parameter_s,False)
# Minimum time above which compilation time will be reported
tc_min = 0.1
try:
mode = 'eval'
t0 = clock()
code = compile(expr,'<timed eval>',mode)
tc = clock()-t0
except SyntaxError:
mode = 'exec'
t0 = clock()
code = compile(expr,'<timed exec>',mode)
tc = clock()-t0
# skew measurement as little as possible
glob = self.shell.user_ns
wtime = time.time
# time execution
wall_st = wtime()
if mode=='eval':
st = clock2()
out = eval(code, glob, user_locals)
end = clock2()
else:
st = clock2()
exec code in glob, user_locals
end = clock2()
out = None
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end-wall_st
cpu_user = end[0]-st[0]
cpu_sys = end[1]-st[1]
cpu_tot = cpu_user+cpu_sys
print "CPU times: user %.2f s, sys: %.2f s, total: %.2f s" % \
(cpu_user,cpu_sys,cpu_tot)
print "Wall time: %.2f s" % wall_time
if tc > tc_min:
print "Compiler : %.2f s" % tc
return out
@skip_doctest
@line_magic
def macro(self, parameter_s=''):
"""Define a macro for future re-execution. It accepts ranges of history,
filenames or string objects.
Usage:\\
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed as the
command line is used instead.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The syntax for indicating input ranges is described in %history.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (%hist prints it)::
44: x=1
45: y=3
46: z=x+y
47: print x
48: a=5
49: print 'x',x,'y',y
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with::
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with::
print macro_name
"""
opts,args = self.parse_options(parameter_s,'r',mode='list')
if not args: # List existing macros
return sorted(k for k,v in self.shell.user_ns.iteritems() if\
isinstance(v, Macro))
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name, codefrom = args[0], " ".join(args[1:])
#print 'rng',ranges # dbg
try:
lines = self.shell.find_user_code(codefrom, 'r' in opts)
except (ValueError, TypeError) as e:
print e.args[0]
return
macro = Macro(lines)
self.shell.define_macro(name, macro)
print 'Macro `%s` created. To execute, type its name (without quotes).' % name
print '=== Macro contents: ==='
print macro,
@magic_arguments.magic_arguments()
@magic_arguments.argument('output', type=str, default='', nargs='?',
help="""The name of the variable in which to store output.
This is a utils.io.CapturedIO object with stdout/err attributes
for the text of the captured output.
CapturedOutput also has a show() method for displaying the output,
and __call__ as well, so you can use that to quickly display the
output.
If unspecified, captured output is discarded.
"""
)
@magic_arguments.argument('--no-stderr', action="store_true",
help="""Don't capture stderr."""
)
@magic_arguments.argument('--no-stdout', action="store_true",
help="""Don't capture stdout."""
)
@cell_magic
def capture(self, line, cell):
"""run the cell, capturing stdout/err"""
args = magic_arguments.parse_argstring(self.capture, line)
out = not args.no_stdout
err = not args.no_stderr
with capture_output(out, err) as io:
self.shell.run_cell(cell)
if args.output:
self.shell.user_ns[args.output] = io
| bsd-3-clause |
tumbl3w33d/ansible | lib/ansible/modules/cloud/amazon/s3_website.py | 13 | 11787 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_website
short_description: Configure an s3 bucket as a website
description:
- Configure an s3 bucket as a website
version_added: "2.2"
requirements: [ boto3 ]
author: Rob White (@wimnat)
options:
name:
description:
- "Name of the s3 bucket"
required: true
type: str
error_key:
description:
- "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
type: str
redirect_all_requests:
description:
- "Describes the redirect behavior for every request to this s3 bucket website endpoint"
type: str
state:
description:
- "Add or remove s3 website configuration"
choices: [ 'present', 'absent' ]
required: true
type: str
suffix:
description:
- >
Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
character.
default: index.html
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Configure an s3 bucket to redirect all requests to example.com
- s3_website:
name: mybucket.com
redirect_all_requests: example.com
state: present
# Remove website configuration from an s3 bucket
- s3_website:
name: mybucket.com
state: absent
# Configure an s3 bucket as a website with index and error pages
- s3_website:
name: mybucket.com
suffix: home.htm
error_key: errors/404.htm
state: present
'''
RETURN = '''
index_document:
description: index document
type: complex
returned: always
contains:
suffix:
description: suffix that is appended to a request that is for a directory on the website endpoint
returned: success
type: str
sample: index.html
error_document:
description: error document
type: complex
returned: always
contains:
key:
description: object key name to use when a 4XX class error occurs
returned: when error_document parameter set
type: str
sample: error.html
redirect_all_requests_to:
description: where to redirect requests
type: complex
returned: always
contains:
host_name:
description: name of the host where requests will be redirected.
returned: when redirect all requests parameter set
type: str
sample: ansible.com
protocol:
description: protocol to use when redirecting requests.
returned: when redirect all requests parameter set
type: str
sample: https
routing_rules:
description: routing rules
type: list
returned: always
contains:
condition:
type: complex
description: A container for describing a condition that must be met for the specified redirect to apply.
contains:
http_error_code_returned_equals:
description: The HTTP error code when the redirect is applied.
returned: always
type: str
key_prefix_equals:
description: object key name prefix when the redirect is applied. For example, to redirect
requests for ExamplePage.html, the key prefix will be ExamplePage.html
returned: when routing rule present
type: str
sample: docs/
redirect:
type: complex
description: Container for redirect information.
returned: always
contains:
host_name:
description: name of the host where requests will be redirected.
returned: when host name set as part of redirect rule
type: str
sample: ansible.com
http_redirect_code:
description: The HTTP redirect code to use on the response.
returned: when routing rule present
type: str
protocol:
description: Protocol to use when redirecting requests.
returned: when routing rule present
type: str
sample: http
replace_key_prefix_with:
description: object key prefix to use in the redirect request
returned: when routing rule present
type: str
sample: documents/
replace_key_with:
description: object key prefix to use in the redirect request
returned: when routing rule present
type: str
sample: documents/
'''
import time
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
def _create_redirect_dict(url):
redirect_dict = {}
url_split = url.split(':')
# Did we split anything?
if len(url_split) == 2:
redirect_dict[u'Protocol'] = url_split[0]
redirect_dict[u'HostName'] = url_split[1].replace('//', '')
elif len(url_split) == 1:
redirect_dict[u'HostName'] = url_split[0]
else:
raise ValueError('Redirect URL appears invalid')
return redirect_dict
def _create_website_configuration(suffix, error_key, redirect_all_requests):
website_configuration = {}
if error_key is not None:
website_configuration['ErrorDocument'] = {'Key': error_key}
if suffix is not None:
website_configuration['IndexDocument'] = {'Suffix': suffix}
if redirect_all_requests is not None:
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
return website_configuration
def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
bucket_name = module.params.get("name")
redirect_all_requests = module.params.get("redirect_all_requests")
# If redirect_all_requests is set then don't use the default suffix that has been set
if redirect_all_requests is not None:
suffix = None
else:
suffix = module.params.get("suffix")
error_key = module.params.get("error_key")
changed = False
try:
bucket_website = resource_connection.BucketWebsite(bucket_name)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
try:
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
website_config = None
else:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
if website_config is None:
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except ValueError as e:
module.fail_json(msg=str(e))
else:
try:
if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
(error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
(redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except KeyError as e:
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except ValueError as e:
module.fail_json(msg=str(e))
# Wait 5 secs before getting the website_config again to give it time to update
time.sleep(5)
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
def disable_bucket_as_website(client_connection, module):
changed = False
bucket_name = module.params.get("name")
try:
client_connection.get_bucket_website(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
module.exit_json(changed=changed)
else:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
try:
client_connection.delete_bucket_website(Bucket=bucket_name)
changed = True
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
suffix=dict(type='str', required=False, default='index.html'),
error_key=dict(type='str', required=False),
redirect_all_requests=dict(type='str', required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['redirect_all_requests', 'suffix'],
['redirect_all_requests', 'error_key']
])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
if state == 'present':
enable_or_update_bucket_as_website(client_connection, resource_connection, module)
elif state == 'absent':
disable_bucket_as_website(client_connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
dalejung/trtools | trtools/core/topper.py | 1 | 4199 | import bottleneck as nb
import pandas as pd
import numpy as np
from trtools.monkey import patch
def bn_topn(arr, N, ascending=None):
"""
Return the top N results. Negative N will give N lowest results
Paramters
---------
arr : Series
one dimension array
N : int
number of elements to return. Negative numbers will return smallest
ascending : bool
Ordering of the return values. Default behavior is greatest absolute
magnitude.
Note
----
Default ascending order depends on N and whether you are looking for the
top and bottom results. If you are looking for the top results, the
most positive results will come first. If you are looking for the bottom
results, then the most negative results comes first
"""
if arr.ndim > 1:
raise Exception("Only works on ndim=1")
if ascending is None:
ascending = not N > 0
arr = arr[~np.isnan(arr)]
if N > 0: # nlargest
N = min(abs(N), len(arr))
N = len(arr) - abs(N)
sl = slice(N, None)
else: # nsmallest
N = min(abs(N), len(arr))
sl = slice(None, N)
if N == 0:
bn_res = arr
else:
out = nb.partsort(arr, N)
bn_res = out[sl]
bn_res = np.sort(bn_res) # sort output
if not ascending:
bn_res = bn_res[::-1]
return bn_res
def bn_topargn(arr, N, ascending=None):
"""
Return the indices of the top N results.
The following should be equivalent
>>> res1 = arr[bn_topargn(arr, 10)]
>>> res2 = bn_topn(arr, 10)
>>> np.all(res1 == res2)
True
"""
if arr.ndim > 1:
raise Exception("Only works on ndim=1")
if ascending is None:
ascending = not N > 0
na_mask = np.isnan(arr)
has_na = na_mask.sum()
if has_na:
# store the old indices for translating back later
old_index_map = np.where(~na_mask)[0]
arr = arr[~na_mask]
if N > 0: # nlargest
N = len(arr) - abs(N)
sl = slice(N, None)
else: # nsmallest
N = abs(N)
sl = slice(None, N)
out = nb.argpartsort(arr, N)
index = out[sl]
# sort the index by their values
index_sort = np.argsort(arr[index])
if not ascending:
index_sort = index_sort[::-1]
index = index[index_sort]
# index is only correct with arr without nans.
# Map back to old_index if needed
if has_na:
index = old_index_map[index]
return index
topn = bn_topn
topargn = bn_topargn
@patch(pd.Series, 'topn')
def _topn_series(self, N, ascending=None):
return pd.Series(topn(self, N, ascending=ascending))
@patch(pd.Series, 'topargn')
def _topargn_series(self, N, ascending=None):
return pd.Series(topargn(self, N, ascending=ascending))
# bn.partsort works on matrix, but i dunno how to handle nans in that case
# i suppose I could min/max and then set Nan to sentinal values?
@patch(pd.DataFrame, 'topn', override=True)
def topn_df(self, N, ascending=None, wrap=True):
vals = self.values
rows = vals.shape[0]
# don't make the return have more columns than the dataframes
cols = min(len(self.columns), abs(N))
ret = np.ndarray((rows, cols))
ret[:] = np.nan
for i in range(rows):
r = topn(vals[i], N=N, ascending=ascending)
ret[i][:len(r)] = r
if wrap:
return pd.DataFrame(ret, index=self.index)
return np.array(ret)
@patch(pd.DataFrame, 'topargn', override=True)
def topargn_df(self, N, ascending=None, wrap=True):
vals = self.values
rows = vals.shape[0]
ret = np.ndarray((rows, abs(N)), dtype=int)
for i in range(rows):
r = topargn(vals[i], N=N, ascending=ascending)
ret[i] = r
if wrap:
return pd.DataFrame(ret, index=self.index)
return np.array(ret)
@patch(pd.DataFrame, 'topindexn', override=True)
def topindexn_df(self, N, ascending=None):
"""
Pretty much topargn, except it returns column key instead of
positional int
"""
# get pos args
ret = topargn_df(self, N=N, ascending=ascending, wrap=False)
# grab column values
ret = self.columns[ret]
return pd.DataFrame(ret, index=self.index)
| mit |
qubesuser/qubes-core-admin | qubes/app.py | 1 | 42427 | #
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2015 Joanna Rutkowska <[email protected]>
# Copyright (C) 2011-2015 Marek Marczykowski-Górecki
# <[email protected]>
# Copyright (C) 2014-2015 Wojtek Porczyk <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
import collections
import errno
import functools
import grp
import logging
import os
import random
import subprocess
import sys
import tempfile
import time
import traceback
import uuid
import itertools
import lxml.etree
import jinja2
import libvirt
try:
import xen.lowlevel.xs # pylint: disable=wrong-import-order
import xen.lowlevel.xc # pylint: disable=wrong-import-order
except ImportError:
pass
if os.name == 'posix':
# pylint: disable=wrong-import-order
import fcntl
elif os.name == 'nt':
# pylint: disable=import-error
import win32con
import win32file
import pywintypes
else:
raise RuntimeError("Qubes works only on POSIX or WinNT systems")
# pylint: disable=wrong-import-position
import qubes
import qubes.ext
import qubes.utils
import qubes.storage
import qubes.vm
import qubes.vm.adminvm
import qubes.vm.qubesvm
import qubes.vm.templatevm
# pylint: enable=wrong-import-position
class VirDomainWrapper(object):
# pylint: disable=too-few-public-methods
def __init__(self, connection, vm):
self._connection = connection
self._vm = vm
def _reconnect_if_dead(self):
is_dead = not self._vm.connect().isAlive()
if is_dead:
# pylint: disable=protected-access
self._connection._reconnect_if_dead()
self._vm = self._connection._conn.lookupByUUID(self._vm.UUID())
return is_dead
def __getattr__(self, attrname):
attr = getattr(self._vm, attrname)
if not isinstance(attr, collections.Callable):
return attr
@functools.wraps(attr)
def wrapper(*args, **kwargs):
try:
return attr(*args, **kwargs)
except libvirt.libvirtError:
if self._reconnect_if_dead():
return getattr(self._vm, attrname)(*args, **kwargs)
raise
return wrapper
class VirConnectWrapper(object):
# pylint: disable=too-few-public-methods
def __init__(self, uri):
self._conn = libvirt.open(uri)
def _reconnect_if_dead(self):
is_dead = not self._conn.isAlive()
if is_dead:
self._conn = libvirt.open(self._conn.getURI())
# TODO: re-register event handlers
return is_dead
def _wrap_domain(self, ret):
if isinstance(ret, libvirt.virDomain):
ret = VirDomainWrapper(self, ret)
return ret
def __getattr__(self, attrname):
attr = getattr(self._conn, attrname)
if not isinstance(attr, collections.Callable):
return attr
if attrname == 'close':
return attr
@functools.wraps(attr)
def wrapper(*args, **kwargs):
try:
return self._wrap_domain(attr(*args, **kwargs))
except libvirt.libvirtError:
if self._reconnect_if_dead():
return self._wrap_domain(
getattr(self._conn, attrname)(*args, **kwargs))
raise
return wrapper
class VMMConnection(object):
'''Connection to Virtual Machine Manager (libvirt)'''
def __init__(self, offline_mode=None):
'''
:param offline_mode: enable/disable offline mode; default is to
enable when running in chroot as root, otherwise disable
'''
if offline_mode is None:
offline_mode = bool(os.getuid() == 0 and
os.stat('/') != os.stat('/proc/1/root/.'))
self._offline_mode = offline_mode
self._libvirt_conn = None
self._xs = None
self._xc = None
@property
def offline_mode(self):
'''Check or enable offline mode (do not actually connect to vmm)'''
return self._offline_mode
def _libvirt_error_handler(self, ctx, error):
pass
def init_vmm_connection(self):
'''Initialise connection
This method is automatically called when getting'''
if self._libvirt_conn is not None:
# Already initialized
return
if self._offline_mode:
# Do not initialize in offline mode
raise qubes.exc.QubesException(
'VMM operations disabled in offline mode')
if 'xen.lowlevel.xs' in sys.modules:
self._xs = xen.lowlevel.xs.xs()
if 'xen.lowlevel.xc' in sys.modules:
self._xc = xen.lowlevel.xc.xc()
self._libvirt_conn = VirConnectWrapper(
qubes.config.defaults['libvirt_uri'])
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
@property
def libvirt_conn(self):
'''Connection to libvirt'''
self.init_vmm_connection()
return self._libvirt_conn
@property
def xs(self):
'''Connection to Xen Store
This property in available only when running on Xen.
'''
# XXX what about the case when we run under KVM,
# but xen modules are importable?
if 'xen.lowlevel.xs' not in sys.modules:
raise AttributeError(
'xs object is available under Xen hypervisor only')
self.init_vmm_connection()
return self._xs
@property
def xc(self):
'''Connection to Xen
This property in available only when running on Xen.
'''
# XXX what about the case when we run under KVM,
# but xen modules are importable?
if 'xen.lowlevel.xc' not in sys.modules:
raise AttributeError(
'xc object is available under Xen hypervisor only')
self.init_vmm_connection()
return self._xc
def close(self):
libvirt.registerErrorHandler(None, None)
if self._xs:
self._xs.close()
self._xs = None
if self._libvirt_conn:
self._libvirt_conn.close()
self._libvirt_conn = None
self._xc = None # and pray it will get garbage-collected
class QubesHost(object):
'''Basic information about host machine
:param qubes.Qubes app: Qubes application context (must have \
:py:attr:`Qubes.vmm` attribute defined)
'''
def __init__(self, app):
self.app = app
self._no_cpus = None
self._total_mem = None
self._physinfo = None
def _fetch(self):
if self._no_cpus is not None:
return
# pylint: disable=unused-variable
(model, memory, cpus, mhz, nodes, socket, cores, threads) = \
self.app.vmm.libvirt_conn.getInfo()
self._total_mem = int(memory) * 1024
self._no_cpus = cpus
self.app.log.debug('QubesHost: no_cpus={} memory_total={}'.format(
self.no_cpus, self.memory_total))
try:
self.app.log.debug('QubesHost: xen_free_memory={}'.format(
self.get_free_xen_memory()))
except NotImplementedError:
pass
@property
def memory_total(self):
'''Total memory, in kbytes'''
if self.app.vmm.offline_mode:
return 2**64-1
self._fetch()
return self._total_mem
@property
def no_cpus(self):
'''Number of CPUs'''
if self.app.vmm.offline_mode:
return 42
self._fetch()
return self._no_cpus
def get_free_xen_memory(self):
'''Get free memory from Xen's physinfo.
:raises NotImplementedError: when not under Xen
'''
try:
self._physinfo = self.app.xc.physinfo()
except AttributeError:
raise NotImplementedError('This function requires Xen hypervisor')
return int(self._physinfo['free_memory'])
def get_vm_stats(self, previous_time=None, previous=None, only_vm=None):
'''Measure cpu usage for all domains at once.
If previous measurements are given, CPU usage will be given in
percents of time. Otherwise only absolute value (seconds).
Return a tuple of (measurements_time, measurements),
where measurements is a dictionary with key: domid, value: dict:
- cpu_time - absolute CPU usage (seconds since its startup)
- cpu_usage - CPU usage in %
- memory_kb - current memory assigned, in kb
This function requires Xen hypervisor.
..warning:
This function may return info about implementation-specific VMs,
like stubdomains for HVM
:param previous: previous measurement
:param previous_time: time of previous measurement
:param only_vm: get measurements only for this VM
:raises NotImplementedError: when not under Xen
'''
if (previous_time is None) != (previous is None):
raise ValueError(
'previous and previous_time must be given together (or none)')
if previous is None:
previous = {}
current_time = time.time()
current = {}
try:
if only_vm:
xid = only_vm.xid
if xid < 0:
raise qubes.exc.QubesVMNotRunningError(only_vm)
info = self.app.vmm.xc.domain_getinfo(xid, 1)
if info[0]['domid'] != xid:
raise qubes.exc.QubesVMNotRunningError(only_vm)
else:
info = self.app.vmm.xc.domain_getinfo(0, 1024)
except AttributeError:
raise NotImplementedError(
'This function requires Xen hypervisor')
# TODO: add stubdomain stats to actual VMs
for vm in info:
domid = vm['domid']
current[domid] = {}
current[domid]['memory_kb'] = vm['mem_kb']
current[domid]['cpu_time'] = int(
vm['cpu_time'] / max(vm['online_vcpus'], 1))
if domid in previous:
current[domid]['cpu_usage'] = int(
(current[domid]['cpu_time'] - previous[domid]['cpu_time'])
/ 1000 ** 3 * 100 / (current_time - previous_time))
if current[domid]['cpu_usage'] < 0:
# VM has been rebooted
current[domid]['cpu_usage'] = 0
else:
current[domid]['cpu_usage'] = 0
return (current_time, current)
class VMCollection(object):
'''A collection of Qubes VMs
VMCollection supports ``in`` operator. You may test for ``qid``, ``name``
and whole VM object's presence.
Iterating over VMCollection will yield machine objects.
'''
def __init__(self, app):
self.app = app
self._dict = dict()
def close(self):
del self.app
self._dict.clear()
del self._dict
def __repr__(self):
return '<{} {!r}>'.format(
self.__class__.__name__, list(sorted(self.keys())))
def items(self):
'''Iterate over ``(qid, vm)`` pairs'''
for qid in self.qids():
yield (qid, self[qid])
def qids(self):
'''Iterate over all qids
qids are sorted by numerical order.
'''
return iter(sorted(self._dict.keys()))
keys = qids
def names(self):
'''Iterate over all names
names are sorted by lexical order.
'''
return iter(sorted(vm.name for vm in self._dict.values()))
def vms(self):
'''Iterate over all machines
vms are sorted by qid.
'''
return iter(sorted(self._dict.values()))
__iter__ = vms
values = vms
def add(self, value, _enable_events=True):
'''Add VM to collection
:param qubes.vm.BaseVM value: VM to add
:raises TypeError: when value is of wrong type
:raises ValueError: when there is already VM which has equal ``qid``
'''
# this violates duck typing, but is needed
# for VMProperty to function correctly
if not isinstance(value, qubes.vm.BaseVM):
raise TypeError('{} holds only BaseVM instances'.format(
self.__class__.__name__))
if value.qid in self:
raise ValueError('This collection already holds VM that has '
'qid={!r} ({!r})'.format(value.qid, self[value.qid]))
if value.name in self:
raise ValueError('A VM named {!s} already exists'
.format(value.name))
self._dict[value.qid] = value
if _enable_events:
value.events_enabled = True
self.app.fire_event('domain-add', vm=value)
return value
def __getitem__(self, key):
if isinstance(key, int):
return self._dict[key]
if isinstance(key, str):
for vm in self:
if vm.name == key:
return vm
raise KeyError(key)
if isinstance(key, qubes.vm.BaseVM):
key = key.uuid
if isinstance(key, uuid.UUID):
for vm in self:
if vm.uuid == key:
return vm
raise KeyError(key)
raise KeyError(key)
def __delitem__(self, key):
vm = self[key]
if not vm.is_halted():
raise qubes.exc.QubesVMNotHaltedError(vm)
self.app.fire_event('domain-pre-delete', pre_event=True, vm=vm)
try:
vm.libvirt_domain.undefine()
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
# already undefined
pass
del self._dict[vm.qid]
self.app.fire_event('domain-delete', vm=vm)
def __contains__(self, key):
return any((key == vm or key == vm.qid or key == vm.name)
for vm in self)
def __len__(self):
return len(self._dict)
def get_vms_based_on(self, template):
template = self[template]
return set(vm for vm in self
if hasattr(vm, 'template') and vm.template == template)
def get_vms_connected_to(self, netvm):
new_vms = set([self[netvm]])
dependent_vms = set()
# Dependency resolving only makes sense on NetVM (or derivative)
# if not self[netvm_qid].is_netvm():
# return set([])
while new_vms:
cur_vm = new_vms.pop()
for vm in cur_vm.connected_vms:
if vm in dependent_vms:
continue
dependent_vms.add(vm)
# if vm.is_netvm():
new_vms.add(vm)
return dependent_vms
# XXX with Qubes Admin Api this will probably lead to race condition
# whole process of creating and adding should be synchronised
def get_new_unused_qid(self):
used_ids = set(self.qids())
for i in range(1, qubes.config.max_qid):
if i not in used_ids:
return i
raise LookupError("Cannot find unused qid!")
def get_new_unused_netid(self):
used_ids = set([vm.netid for vm in self]) # if vm.is_netvm()])
for i in range(1, qubes.config.max_netid):
if i not in used_ids:
return i
raise LookupError("Cannot find unused netid!")
def get_new_unused_dispid(self):
for _ in range(int(qubes.config.max_dispid ** 0.5)):
dispid = random.SystemRandom().randrange(qubes.config.max_dispid)
if not any(getattr(vm, 'dispid', None) == dispid for vm in self):
return dispid
raise LookupError((
'https://xkcd.com/221/',
'http://dilbert.com/strip/2001-10-25')[random.randint(0, 1)])
def _default_pool(app):
''' Default storage pool.
1. If there is one named 'default', use it.
2. Check if root fs is on LVM thin - use that
3. Look for file-based pool pointing /var/lib/qubes
4. Fail
'''
if 'default' in app.pools:
return app.pools['default']
else:
rootfs = os.stat('/')
root_major = (rootfs.st_dev & 0xff00) >> 8
root_minor = rootfs.st_dev & 0xff
for pool in app.pools.values():
if pool.config.get('driver', None) != 'lvm_thin':
continue
thin_pool = pool.config['thin_pool']
thin_volumes = subprocess.check_output(
['lvs', '--select', 'pool_lv=' + thin_pool,
'-o', 'lv_kernel_major,lv_kernel_minor', '--noheadings'])
thin_volumes = thin_volumes.decode()
if any([str(root_major), str(root_minor)] == thin_vol.split()
for thin_vol in thin_volumes.splitlines()):
return pool
# not a thin volume? look for file pools
for pool in app.pools.values():
if pool.config.get('driver', None) != 'file':
continue
if pool.config['dir_path'] == qubes.config.qubes_base_dir:
return pool
raise AttributeError('Cannot determine default storage pool')
def _setter_pool(app, prop, value):
if isinstance(value, qubes.storage.Pool):
return value
try:
return app.pools[value]
except KeyError:
raise qubes.exc.QubesPropertyValueError(app, prop, value,
'No such storage pool')
class Qubes(qubes.PropertyHolder):
'''Main Qubes application
:param str store: path to ``qubes.xml``
The store is loaded in stages:
1. In the first stage there are loaded some basic features from store
(currently labels).
2. In the second stage stubs for all VMs are loaded. They are filled
with their basic properties, like ``qid`` and ``name``.
3. In the third stage all global properties are loaded. They often
reference VMs, like default netvm, so they should be filled after
loading VMs.
4. In the fourth stage all remaining VM properties are loaded. They
also need all VMs loaded, because they represent dependencies
between VMs like aforementioned netvm.
5. In the fifth stage there are some fixups to ensure sane system
operation.
This class emits following events:
.. event:: domain-add (subject, event, vm)
When domain is added.
:param subject: Event emitter
:param event: Event name (``'domain-add'``)
:param vm: Domain object
.. event:: domain-pre-delete (subject, event, vm)
When domain is deleted. VM still has reference to ``app`` object,
and is contained within VMCollection. You may prevent removal by
raising an exception.
:param subject: Event emitter
:param event: Event name (``'domain-pre-delete'``)
:param vm: Domain object
.. event:: domain-delete (subject, event, vm)
When domain is deleted. VM still has reference to ``app`` object,
but is not contained within VMCollection.
:param subject: Event emitter
:param event: Event name (``'domain-delete'``)
:param vm: Domain object
Methods and attributes:
'''
default_netvm = qubes.VMProperty('default_netvm', load_stage=3,
default=None, allow_none=True,
doc='''Default NetVM for AppVMs. Initial state is `None`, which means
that AppVMs are not connected to the Internet.''')
default_fw_netvm = qubes.VMProperty('default_fw_netvm', load_stage=3,
default=None, allow_none=True,
doc='''Default NetVM for ProxyVMs. Initial state is `None`, which means
that ProxyVMs (including FirewallVM) are not connected to the
Internet.''')
default_template = qubes.VMProperty('default_template', load_stage=3,
vmclass=qubes.vm.templatevm.TemplateVM,
doc='Default template for new AppVMs')
updatevm = qubes.VMProperty('updatevm', load_stage=3,
allow_none=True,
doc='''Which VM to use as `yum` proxy for updating AdminVM and
TemplateVMs''')
clockvm = qubes.VMProperty('clockvm', load_stage=3,
default=None, allow_none=True,
doc='Which VM to use as NTP proxy for updating AdminVM')
default_kernel = qubes.property('default_kernel', load_stage=3,
doc='Which kernel to use when not overriden in VM')
default_dispvm = qubes.VMProperty('default_dispvm', load_stage=3,
doc='Default DispVM base for service calls', allow_none=True)
default_pool = qubes.property('default_pool', load_stage=3,
default=_default_pool,
setter=_setter_pool,
doc='Default storage pool')
default_pool_private = qubes.property('default_pool_private', load_stage=3,
default=lambda app: app.default_pool,
setter=_setter_pool,
doc='Default storage pool for private volumes')
default_pool_root = qubes.property('default_pool_root', load_stage=3,
default=lambda app: app.default_pool,
setter=_setter_pool,
doc='Default storage pool for root volumes')
default_pool_volatile = qubes.property('default_pool_volatile',
load_stage=3,
default=lambda app: app.default_pool,
setter=_setter_pool,
doc='Default storage pool for volatile volumes')
default_pool_kernel = qubes.property('default_pool_kernel', load_stage=3,
default=lambda app: app.default_pool,
setter=_setter_pool,
doc='Default storage pool for kernel volumes')
stats_interval = qubes.property('stats_interval',
default=3,
type=int,
doc='Interval in seconds for VM stats reporting (memory, CPU usage)')
# TODO #1637 #892
check_updates_vm = qubes.property('check_updates_vm',
type=bool, setter=qubes.property.bool,
default=True,
doc='check for updates inside qubes')
def __init__(self, store=None, load=True, offline_mode=None, lock=False,
**kwargs):
#: logger instance for logging global messages
self.log = logging.getLogger('app')
self.log.debug('init() -> %#x', id(self))
self.log.debug('stack:')
for frame in traceback.extract_stack():
self.log.debug('%s', frame)
self._extensions = qubes.ext.get_extensions()
#: collection of all VMs managed by this Qubes instance
self.domains = VMCollection(self)
#: collection of all available labels for VMs
self.labels = {}
#: collection of all pools
self.pools = {}
#: Connection to VMM
self.vmm = VMMConnection(offline_mode=offline_mode)
#: Information about host system
self.host = QubesHost(self)
if store is not None:
self._store = store
else:
self._store = os.environ.get('QUBES_XML_PATH',
os.path.join(
qubes.config.qubes_base_dir,
qubes.config.system_path['qubes_store_filename']))
super(Qubes, self).__init__(xml=None, **kwargs)
self.__load_timestamp = None
self.__locked_fh = None
self._domain_event_callback_id = None
#: jinja2 environment for libvirt XML templates
self.env = jinja2.Environment(
loader=jinja2.FileSystemLoader([
'/etc/qubes/templates',
'/usr/share/qubes/templates',
]),
undefined=jinja2.StrictUndefined)
if load:
self.load(lock=lock)
self.events_enabled = True
@property
def store(self):
return self._store
def load(self, lock=False):
'''Open qubes.xml
:throws EnvironmentError: failure on parsing store
:throws xml.parsers.expat.ExpatError: failure on parsing store
:raises lxml.etree.XMLSyntaxError: on syntax error in qubes.xml
'''
fh = self._acquire_lock()
self.xml = lxml.etree.parse(fh)
# stage 1: load labels and pools
for node in self.xml.xpath('./labels/label'):
label = qubes.Label.fromxml(node)
self.labels[label.index] = label
for node in self.xml.xpath('./pools/pool'):
name = node.get('name')
assert name, "Pool name '%s' is invalid " % name
try:
self.pools[name] = self._get_pool(**node.attrib)
except qubes.exc.QubesException as e:
self.log.error(str(e))
# stage 2: load VMs
for node in self.xml.xpath('./domains/domain'):
# pylint: disable=no-member
cls = self.get_vm_class(node.get('class'))
vm = cls(self, node)
vm.load_properties(load_stage=2)
vm.init_log()
self.domains.add(vm, _enable_events=False)
if 0 not in self.domains:
self.domains.add(
qubes.vm.adminvm.AdminVM(self, None, qid=0, name='dom0'),
_enable_events=False)
# stage 3: load global properties
self.load_properties(load_stage=3)
# stage 4: fill all remaining VM properties
for vm in self.domains:
vm.load_properties(load_stage=4)
vm.load_extras()
# stage 5: misc fixups
self.property_require('default_fw_netvm', allow_none=True)
self.property_require('default_netvm', allow_none=True)
self.property_require('default_template')
self.property_require('clockvm', allow_none=True)
self.property_require('updatevm', allow_none=True)
for vm in self.domains:
vm.events_enabled = True
vm.fire_event('domain-load')
# get a file timestamp (before closing it - still holding the lock!),
# to detect whether anyone else have modified it in the meantime
self.__load_timestamp = os.path.getmtime(self._store)
if not lock:
self._release_lock()
def __xml__(self):
element = lxml.etree.Element('qubes')
element.append(self.xml_labels())
pools_xml = lxml.etree.Element('pools')
for pool in self.pools.values():
xml = pool.__xml__()
if xml is not None:
pools_xml.append(xml)
element.append(pools_xml)
element.append(self.xml_properties())
domains = lxml.etree.Element('domains')
for vm in self.domains:
domains.append(vm.__xml__())
element.append(domains)
return element
def __str__(self):
return type(self).__name__
def save(self, lock=True):
'''Save all data to qubes.xml
There are several problems with saving :file:`qubes.xml` which must be
mitigated:
- Running out of disk space. No space left should not result in empty
file. This is done by writing to temporary file and then renaming.
- Attempts to write two or more files concurrently. This is done by
sophisticated locking.
:param bool lock: keep file locked after saving
:throws EnvironmentError: failure on saving
'''
if not self.__locked_fh:
self._acquire_lock(for_save=True)
fh_new = tempfile.NamedTemporaryFile(
prefix=self._store, delete=False)
lxml.etree.ElementTree(self.__xml__()).write(
fh_new, encoding='utf-8', pretty_print=True)
fh_new.flush()
try:
os.chown(fh_new.name, -1, grp.getgrnam('qubes').gr_gid)
os.chmod(fh_new.name, 0o660)
except KeyError: # group 'qubes' not found
# don't change mode if no 'qubes' group in the system
pass
os.rename(fh_new.name, self._store)
# update stored mtime, in case of multiple save() calls without
# loading qubes.xml again
self.__load_timestamp = os.path.getmtime(self._store)
# this releases lock for all other processes,
# but they should instantly block on the new descriptor
self.__locked_fh.close()
self.__locked_fh = fh_new
if not lock:
self._release_lock()
def close(self):
'''Deconstruct the object and break circular references
After calling this the object is unusable, not even for saving.'''
self.log.debug('close() <- %#x', id(self))
for frame in traceback.extract_stack():
self.log.debug('%s', frame)
super().close()
if self._domain_event_callback_id is not None:
self.vmm.libvirt_conn.domainEventDeregisterAny(
self._domain_event_callback_id)
self._domain_event_callback_id = None
# Only our Lord, The God Almighty, knows what references
# are kept in extensions.
del self._extensions
for vm in self.domains:
vm.close()
self.domains.close()
del self.domains
self.vmm.close()
del self.vmm
del self.host
if self.__locked_fh:
self._release_lock()
def _acquire_lock(self, for_save=False):
assert self.__locked_fh is None, 'double lock'
while True:
try:
fd = os.open(self._store,
os.O_RDWR | (os.O_CREAT * int(for_save)))
except OSError as e:
if not for_save and e.errno == errno.ENOENT:
raise qubes.exc.QubesException(
'Qubes XML store {!r} is missing; '
'use qubes-create tool'.format(self._store))
raise
# While we were waiting for lock, someone could have unlink()ed
# (or rename()d) our file out of the filesystem. We have to
# ensure we got lock on something linked to filesystem.
# If not, try again.
if os.fstat(fd) != os.stat(self._store):
os.close(fd)
continue
if self.__load_timestamp and \
os.path.getmtime(self._store) != self.__load_timestamp:
os.close(fd)
raise qubes.exc.QubesException(
'Someone else modified qubes.xml in the meantime')
break
if os.name == 'posix':
fcntl.lockf(fd, fcntl.LOCK_EX)
elif os.name == 'nt':
# pylint: disable=protected-access
overlapped = pywintypes.OVERLAPPED()
win32file.LockFileEx(
win32file._get_osfhandle(fd),
win32con.LOCKFILE_EXCLUSIVE_LOCK, 0, -0x10000, overlapped)
self.__locked_fh = os.fdopen(fd, 'r+b')
return self.__locked_fh
def _release_lock(self):
assert self.__locked_fh is not None, 'double release'
# intentionally do not call explicit unlock to not unlock the file
# before all buffers are flushed
self.__locked_fh.close()
self.__locked_fh = None
def load_initial_values(self):
self.labels = {
1: qubes.Label(1, '0xcc0000', 'red'),
2: qubes.Label(2, '0xf57900', 'orange'),
3: qubes.Label(3, '0xedd400', 'yellow'),
4: qubes.Label(4, '0x73d216', 'green'),
5: qubes.Label(5, '0x555753', 'gray'),
6: qubes.Label(6, '0x3465a4', 'blue'),
7: qubes.Label(7, '0x75507b', 'purple'),
8: qubes.Label(8, '0x000000', 'black'),
}
assert max(self.labels.keys()) == qubes.config.max_default_label
# check if the default LVM Thin pool qubes_dom0/pool00 exists
if os.path.exists('/dev/mapper/qubes_dom0-pool00-tpool'):
self.add_pool(volume_group='qubes_dom0', thin_pool='pool00',
name='lvm', driver='lvm_thin')
# pool based on /var/lib/qubes will be created here:
for name, config in qubes.config.defaults['pool_configs'].items():
self.pools[name] = self._get_pool(**config)
self.default_pool_kernel = 'linux-kernel'
self.domains.add(
qubes.vm.adminvm.AdminVM(self, None, label='black'))
@classmethod
def create_empty_store(cls, *args, **kwargs):
self = cls(*args, load=False, **kwargs)
if os.path.exists(self.store):
raise qubes.exc.QubesException(
'{} already exists, aborting'.format(self.store))
self.load_initial_values()
# TODO py3 get lock= as keyword-only arg
self.save(kwargs.get('lock'))
return self
def xml_labels(self):
'''Serialise labels
:rtype: lxml.etree._Element
'''
labels = lxml.etree.Element('labels')
for label in sorted(self.labels.values(), key=lambda labl: labl.index):
labels.append(label.__xml__())
return labels
@staticmethod
def get_vm_class(clsname):
'''Find the class for a domain.
Classes are registered as setuptools' entry points in ``qubes.vm``
group. Any package may supply their own classes.
:param str clsname: name of the class
:return type: class
'''
try:
return qubes.utils.get_entry_point_one(
qubes.vm.VM_ENTRY_POINT, clsname)
except KeyError:
raise qubes.exc.QubesException(
'no such VM class: {!r}'.format(clsname))
# don't catch TypeError
def add_new_vm(self, cls, qid=None, **kwargs):
'''Add new Virtual Machine to collection
'''
if qid is None:
qid = self.domains.get_new_unused_qid()
if isinstance(cls, str):
cls = self.get_vm_class(cls)
# handle default template; specifically allow template=None (do not
# override it with default template)
if 'template' not in kwargs and hasattr(cls, 'template'):
kwargs['template'] = self.default_template
elif 'template' in kwargs and isinstance(kwargs['template'], str):
kwargs['template'] = self.domains[kwargs['template']]
return self.domains.add(cls(self, None, qid=qid, **kwargs))
def get_label(self, label):
'''Get label as identified by index or name
:throws KeyError: when label is not found
'''
# first search for index, verbatim
try:
return self.labels[label]
except KeyError:
pass
# then search for name
for i in self.labels.values():
if i.name == label:
return i
# last call, if label is a number represented as str, search in indices
try:
return self.labels[int(label)]
except (KeyError, ValueError):
pass
raise KeyError(label)
def add_pool(self, name, **kwargs):
""" Add a storage pool to config."""
if name in self.pools.keys():
raise qubes.exc.QubesException('pool named %s already exists \n' %
name)
kwargs['name'] = name
pool = self._get_pool(**kwargs)
pool.setup()
self.pools[name] = pool
return pool
def remove_pool(self, name):
""" Remove a storage pool from config file. """
try:
pool = self.pools[name]
del self.pools[name]
pool.destroy()
except KeyError:
return
def get_pool(self, pool):
''' Returns a :py:class:`qubes.storage.Pool` instance '''
if isinstance(pool, qubes.storage.Pool):
return pool
try:
return self.pools[pool]
except KeyError:
raise qubes.exc.QubesException('Unknown storage pool ' + pool)
@staticmethod
def _get_pool(**kwargs):
try:
name = kwargs['name']
assert name, 'Name needs to be an non empty string'
except KeyError:
raise qubes.exc.QubesException('No pool name for pool')
try:
driver = kwargs['driver']
except KeyError:
raise qubes.exc.QubesException('No driver specified for pool ' +
name)
try:
klass = qubes.utils.get_entry_point_one(
qubes.storage.STORAGE_ENTRY_POINT, driver)
del kwargs['driver']
return klass(**kwargs)
except KeyError:
raise qubes.exc.QubesException('No driver %s for pool %s' %
(driver, name))
def register_event_handlers(self):
'''Register libvirt event handlers, which will translate libvirt
events into qubes.events. This function should be called only in
'qubesd' process and only when mainloop has been already set.
'''
self._domain_event_callback_id = (
self.vmm.libvirt_conn.domainEventRegisterAny(
None, # any domain
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._domain_event_callback,
None))
def _domain_event_callback(self, _conn, domain, event, _detail, _opaque):
'''Generic libvirt event handler (virConnectDomainEventCallback),
translate libvirt event into qubes.events.
'''
if not self.events_enabled:
return
try:
vm = self.domains[domain.name()]
except KeyError:
# ignore events for unknown domains
return
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
vm.fire_event('domain-shutdown')
@qubes.events.handler('domain-pre-delete')
def on_domain_pre_deleted(self, event, vm):
# pylint: disable=unused-argument
for obj in itertools.chain(self.domains, (self,)):
for prop in obj.property_list():
try:
if isinstance(prop, qubes.vm.VMProperty) and \
getattr(obj, prop.__name__) == vm:
self.log.error(
'Cannot remove %s, used by %s.%s',
vm, obj, prop.__name__)
raise qubes.exc.QubesVMInUseError(vm)
except AttributeError:
pass
@qubes.events.handler('domain-delete')
def on_domain_deleted(self, event, vm):
# pylint: disable=unused-argument
for propname in (
'default_netvm',
'default_fw_netvm',
'clockvm',
'updatevm',
'default_template',
):
try:
if getattr(self, propname) == vm:
delattr(self, propname)
except AttributeError:
pass
@qubes.events.handler('property-pre-set:clockvm')
def on_property_pre_set_clockvm(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument,no-self-use
if newvalue is None:
return
if 'service.clocksync' not in newvalue.features:
newvalue.features['service.clocksync'] = True
@qubes.events.handler('property-set:clockvm')
def on_property_set_clockvm(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument,no-self-use
if oldvalue and oldvalue.features.get('service.clocksync', False):
del oldvalue.features['service.clocksync']
@qubes.events.handler(
'property-pre-set:default_netvm',
'property-pre-set:default_fw_netvm')
def on_property_pre_set_default_netvm(self, event, name, newvalue,
oldvalue=None):
# pylint: disable=unused-argument,invalid-name
if newvalue is not None and oldvalue is not None \
and oldvalue.is_running() and not newvalue.is_running() \
and self.domains.get_vms_connected_to(oldvalue):
raise qubes.exc.QubesVMNotRunningError(newvalue,
'Cannot change {!r} to domain that '
'is not running ({!r}).'.format(name, newvalue.name))
@qubes.events.handler('property-set:default_fw_netvm')
def on_property_set_default_fw_netvm(self, event, name, newvalue,
oldvalue=None):
# pylint: disable=unused-argument,invalid-name
for vm in self.domains:
if hasattr(vm, 'provides_network') and vm.provides_network and \
hasattr(vm, 'netvm') and vm.property_is_default('netvm'):
# fire property-del:netvm as it is responsible for resetting
# netvm to it's default value
vm.fire_event('property-pre-del:netvm', pre_event=True,
name='netvm', oldvalue=oldvalue)
vm.fire_event('property-del:netvm',
name='netvm', oldvalue=oldvalue)
@qubes.events.handler('property-set:default_netvm')
def on_property_set_default_netvm(self, event, name, newvalue,
oldvalue=None):
# pylint: disable=unused-argument
for vm in self.domains:
if hasattr(vm, 'provides_network') and not vm.provides_network and \
hasattr(vm, 'netvm') and vm.property_is_default('netvm'):
# fire property-del:netvm as it is responsible for resetting
# netvm to it's default value
vm.fire_event('property-pre-del:netvm', pre_event=True,
name='netvm', oldvalue=oldvalue)
vm.fire_event('property-del:netvm',
name='netvm', oldvalue=oldvalue)
| gpl-2.0 |
uberlaggydarwin/bugfree-wookie | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
InspectorB/3Dami-python | src/raw/load.py | 1 | 5632 | from os.path import abspath, exists, isdir, isfile, walk, join, basename, expanduser, getsize
import io
from utils.pubsub import PubSub
import struct
from wire.ttypes import TBinaryProtocol
from wire.ttypes import Message
from thrift.transport import TTransport
class DataLocation:
"""A DataLocation represents where data resides on disk"""
ignore_paths = ['rdiff-backup-data']
def __init__(self, directory):
"""
Arguments:
directory -- A directory path string
"""
self.location = abspath(expanduser(directory))
assert exists(self.location), "The given path does not exist"
assert isdir(self.location), "The given path is not a directory"
@staticmethod
def sort_paths_by_size(paths):
"""
Sort the list of paths by the size of the files. Directories are filtered out.
"""
files = [p for p in paths if isfile(p)]
files_with_size = [(p, getsize(p)) for p in files]
files_sorted = sorted(files_with_size, key=lambda x: x[1], reverse=True)
return [f[0] for f in files_sorted]
def list_files(self, sort_by_size=False):
"""
:param sort_by_size: instead of listing the files by creation date, return them sorted by size.
This is useful for distributing work on the cluster, where we want the biggest files to be
processed first. N.B. This should only be used with the sessions, and not with the raw data, for
there we want to maintain temporal order.
"""
files = []
def visit(fs, directory, names):
for ip in DataLocation.ignore_paths:
if ip in names:
names.remove(ip)
fs.extend([n for n in [join(directory, m) for m in names] if isfile(n) and basename(n)[0] != '.'])
walk(self.location, visit, files)
if sort_by_size:
return self.sort_paths_by_size(files)
else:
return files
def count_files(self):
return len(self.list_files())
class LoaderBase(PubSub):
""""Load data from file and for each object notify subscribers"""
PUBLISH_BOTH = 2
PUBLISH_HEADER = 1
PUBLISH_NONE = 0
def __init__(self, selector=None):
PubSub.__init__(self)
self.selector = selector
def on_finish(self):
pass
def read_single(self, fp):
with io.open(fp, "rb", 2**16) as buf:
transport = TTransport.TFileObjectTransport(buf)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
try:
while True:
hbs = buf.read(48) # Header is 48 bytes long, see scala code
if len(hbs) != 48: break # Stop reading from this file if there aren't enough bytes
header = struct.unpack_from('>qqqqhhiii', hbs)
if self.selector == None \
or self.selector == self.PUBLISH_BOTH \
or (hasattr(self.selector, '__call__') and self.selector(header) == self.PUBLISH_BOTH):
msg = Message()
msg.read(protocol)
self.publish((header, msg))
else:
buf.seek(Header.size(header), io.SEEK_CUR)
if self.selector == self.PUBLISH_HEADER \
or (hasattr(self.selector, '__call__') and self.selector(header) == self.PUBLISH_HEADER):
self.publish((header, None))
except EOFError, e:
print 'Encountered unexpected EOF in file %s' % fp
def read(self):
raise Exception("Implement!")
class Loader(LoaderBase):
""""Load data from files provided by a DataLocation"""
def __init__(self, dl, selector=None):
"""
The loader goes through all files at the given data location and loads a header and
then the corresponding message. The selector determines what is published.
Arguments:
dl -- a DataLocation
selector -- a function that determines what is published
"""
LoaderBase.__init__(self, selector=selector)
self.dl = dl
self.handled_files = 0
def read(self):
for fp in self.dl.list_files():
self.handled_files += 1
self.read_single(fp)
# Notify everyone we've finished
self.finish()
class LoaderSingle(LoaderBase):
"""Load only a single file"""
def __init__(self, fp, selector=None):
"""
The loader goes through all files at the given data location and loads a header and
then the corresponding message. The selector determines what is published.
Arguments:
fp -- a file path (string)
selector -- a function that determines what is published
"""
LoaderBase.__init__(self, selector=selector)
self.fp = fp
def read(self):
self.read_single(self.fp)
# Notify everyone we've finished
self.finish()
class Header:
"""
The Header class is useful for dealing with header tuples.
"""
def __init__(self):
raise Exception("Bad boy!")
@staticmethod
def timestamp_server(t): return t[0]
@staticmethod
def timestamp_client(t): return t[1]
@staticmethod
def user(t): return t[2]
@staticmethod
def size(t): return t[3]
@staticmethod
def type_metadata(t): return t[4]
@staticmethod
def type_data(t): return t[5]
| gpl-3.0 |
llvm/llvm-zorg | zorg/buildbot/commands/NinjaCommand.py | 1 | 3878 | # TODO: Use Interpolate instead of WithProperties.
import re
from buildbot.process.properties import WithProperties
from buildbot.steps.shell import WarningCountingShellCommand
class NinjaCommand(WarningCountingShellCommand):
DEFAULT_NINJA = 'ninja'
@staticmethod
def sanitize_kwargs(kwargs):
# kwargs we could get and must not pass through
# to the buildstep.RemoteShellCommand constructor.
# Note: This is a workaround of the buildbot design issue,
# thus should be removed once the original issue gets fixed.
consume_kwargs = [
"jobs",
"loadaverage",
]
sanitized_kwargs = kwargs.copy()
for k in consume_kwargs:
if k in sanitized_kwargs.keys():
del sanitized_kwargs[k]
return sanitized_kwargs
name = "build"
haltOnFailure = True
description = ["building"]
descriptionDone = ["build"]
renderables = (
'options',
'targets',
'ninja',
)
def __init__(self, options=None, targets=None, ninja=DEFAULT_NINJA, logObserver=None, **kwargs):
self.ninja = ninja
self.targets = targets
if options is None:
self.options = list()
else:
self.options = list(options)
if logObserver:
self.logObserver = logObserver
self.addLogObserver('stdio', self.logObserver)
j_opt = re.compile(r'^-j$|^-j\d+$')
l_opt = re.compile(r'^-l$|^-l\d+(\.(\d+)?)?$')
command = list()
command += [self.ninja]
# We can get jobs in the options. If so, we would use that.
if not any(j_opt.search(opt) for opt in self.options if isinstance(opt, str)):
# Otherwise let's see if we got it in the kwargs.
if kwargs.get('jobs', None):
self.options += ["-j", kwargs['jobs']]
else:
# Use the property if option was not explicitly
# specified.
command += [
WithProperties("%(jobs:+-j)s"),
WithProperties("%(jobs:-)s"),
]
# The same logic is for handling the loadaverage option.
if not any(l_opt.search(opt) for opt in self.options if isinstance(opt, str)):
if kwargs.get('loadaverage', None):
self.options += ["-l", kwargs['loadaverage']]
else:
command += [
WithProperties("%(loadaverage:+-l)s"),
WithProperties("%(loadaverage:-)s"),
]
if self.options:
command += self.options
if self.targets:
command += self.targets
# Remove here all the kwargs any of our LLVM buildbot command could consume.
# Note: We will remove all the empty items from the command at start, as we
# still didn't get yet WithProperties rendered.
sanitized_kwargs = self.sanitize_kwargs(kwargs)
sanitized_kwargs["command"] = command
# And upcall to let the base class do its work
super().__init__(**sanitized_kwargs)
def setupEnvironment(self, cmd):
# First upcall to get everything prepared.
super().setupEnvironment(cmd)
# Set default status format string.
if cmd.args['env'] is None:
cmd.args['env'] = {}
cmd.args['env']['NINJA_STATUS'] = cmd.args['env'].get('NINJA_STATUS', "%e [%u/%r/%f] ")
def buildCommandKwargs(self, warnings):
kwargs = super().buildCommandKwargs(warnings)
# Remove all the empty items from the command list,
# which we could get if Interpolate rendered to empty strings.
kwargs['command'] = [cmd for cmd in kwargs['command'] if cmd]
return kwargs
| apache-2.0 |
markstoehr/structured_gaussian_mixtures | structured_gaussian_mixtures/online_GMM_no_mean.py | 1 | 3515 | from __future__ import division, print_function
from bm_tools import OnlineLogsumexp, sigmoid, log1pexp, logsumexp
import numpy
TRAIN = "/home/mark/Projects/succotash/succotash/datasets/train_examples.npy"
TEST = "/home/mark/Projects/succotash/succotash/datasets/test_examples.npy"
X = numpy.load(TRAIN)
n_components = 25
n_features = X.shape[1]
model_means = numpy.zeros((n_components,n_features))
model_weights = numpy.ones(n_components)/n_components
model_covars = numpy.zeros((n_components,n_features))
first_moment_stats = numpy.zeros(model_means.shape)
second_moment_stats = numpy.zeros(model_means.shape)
weights = numpy.zeros(model_weights.shape)
def score_samples(X,model_means,model_weights,model_covars,use_scipy_misc=False):
inv_covars = 1.0/model_covars
n_features = X.shape[1]
lpr= - 0.5 * ( n_features * numpy.log(2*numpy.pi) +
numpy.sum(numpy.log(model_covars),1)
+ numpy.sum( (model_means**2)*inv_covars,1)
- 2 * numpy.dot(X, (model_means*inv_covars).T)
+ numpy.dot(X**2, inv_covars.T))
if numpy.any(numpy.isnan(lpr)):
import pdb; pdb.set_trace()
lpr += numpy.log(model_weights)
logprob = logsumexp(lpr)
responsibilities = numpy.exp(lpr - logprob[:,numpy.newaxis])
return logprob, responsibilities
for i in xrange(n_components):
# model_means[i] = numpy.mean(X[i::n_components],0)
model_covars[i] = numpy.mean((X[i::n_components] - model_means[i])**2,0)
minibatch_size = 300
alpha = 0.05
n_batches = X.shape[0]/minibatch_size
current_log_likelihood=None
for i in xrange(2000):
prev_log_likelihood = current_log_likelihood
batch_idx = i % n_batches
if batch_idx == n_batches - 1:
batch_end = X.shape[0]
else:
batch_end = (batch_idx+1)*minibatch_size
cur_minibatch_size = batch_end - batch_idx*minibatch_size
X_batch = X[batch_idx*minibatch_size:batch_end]
lls, responsibilities = score_samples(
X_batch,
model_means,model_weights,model_covars)
current_log_likelihood = lls.mean()
if prev_log_likelihood is not None:
change = abs((current_log_likelihood - prev_log_likelihood)/prev_log_likelihood)
if change < .00001:
pass #break
weights_tmp = responsibilities.sum(0)
if i == 0:
weights[:] = weights_tmp
else:
weights += alpha * ( weights_tmp - weights)
inverse_weights = 1.0/(weights_tmp[:,numpy.newaxis] + 1e-5)
model_weights = weights/(weights.sum() + 1e-5) + 1e-6
model_weights /= model_weights.sum()
# model_means[:] = first_moment_stats
weighted_X_sq_sum = numpy.dot(responsibilities.T,X_batch**2) * inverse_weights
if i == 0:
second_moment_stats[:] = weighted_X_sq_sum
else:
second_moment_stats[:] += alpha * (weighted_X_sq_sum - second_moment_stats)
model_covars = second_moment_stats - first_moment_stats**2
print(current_log_likelihood,i)
X_test = numpy.load(TEST)
lls, responsibilities = score_samples(X_test,model_means,model_weights,model_covars)
print(lls.mean())
# n_c = 25; lls = -2054.1306788521774
# n_c = 50; lls = -2041.20835419
# n_c = 100; lls = -2040.87370778
# n_c = 200; lls = -2040.81411834
# n_c = 300; lls = -2041.50793475
from sklearn.mixture import gmm
smodel = gmm.GMM(n_components=25, covariance_type='diag', n_iter=2000, init_params='wc', params='wc')
smodel.means_ = numpy.zeros((n_components, n_features))
smodel.fit(X)
smodel.score(X_test).mean()
| apache-2.0 |
zzeleznick/zDjango | venv/lib/python2.7/site-packages/psycopg2/tests/test_extras_dictcursor.py | 62 | 17404 | #!/usr/bin/env python
#
# extras_dictcursor - test if DictCursor extension class works
#
# Copyright (C) 2004-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
from datetime import timedelta
import psycopg2
import psycopg2.extras
from testutils import unittest, ConnectingTestCase, skip_before_postgres
from testutils import skip_if_no_namedtuple
class ExtrasDictCursorTests(ConnectingTestCase):
"""Test if DictCursor extension class works."""
def setUp(self):
ConnectingTestCase.setUp(self)
curs = self.conn.cursor()
curs.execute("CREATE TEMPORARY TABLE ExtrasDictCursorTests (foo text)")
curs.execute("INSERT INTO ExtrasDictCursorTests VALUES ('bar')")
self.conn.commit()
def testDictConnCursorArgs(self):
self.conn.close()
self.conn = self.connect(connection_factory=psycopg2.extras.DictConnection)
cur = self.conn.cursor()
self.assert_(isinstance(cur, psycopg2.extras.DictCursor))
self.assertEqual(cur.name, None)
# overridable
cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.NamedTupleCursor)
self.assertEqual(cur.name, 'foo')
self.assert_(isinstance(cur, psycopg2.extras.NamedTupleCursor))
def testDictCursorWithPlainCursorFetchOne(self):
self._testWithPlainCursor(lambda curs: curs.fetchone())
def testDictCursorWithPlainCursorFetchMany(self):
self._testWithPlainCursor(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithPlainCursorFetchManyNoarg(self):
self._testWithPlainCursor(lambda curs: curs.fetchmany()[0])
def testDictCursorWithPlainCursorFetchAll(self):
self._testWithPlainCursor(lambda curs: curs.fetchall()[0])
def testDictCursorWithPlainCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithPlainCursor(getter)
def testUpdateRow(self):
row = self._testWithPlainCursor(lambda curs: curs.fetchone())
row['foo'] = 'qux'
self.failUnless(row['foo'] == 'qux')
self.failUnless(row[0] == 'qux')
@skip_before_postgres(8, 0)
def testDictCursorWithPlainCursorIterRowNumber(self):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
self._testIterRowNumber(curs)
def _testWithPlainCursor(self, getter):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
self.failUnless(row[0] == 'bar')
return row
def testDictCursorWithPlainCursorRealFetchOne(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchone())
def testDictCursorWithPlainCursorRealFetchMany(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithPlainCursorRealFetchManyNoarg(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchmany()[0])
def testDictCursorWithPlainCursorRealFetchAll(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchall()[0])
def testDictCursorWithPlainCursorRealIter(self):
def getter(curs):
for row in curs:
return row
self._testWithPlainCursorReal(getter)
@skip_before_postgres(8, 0)
def testDictCursorWithPlainCursorRealIterRowNumber(self):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
self._testIterRowNumber(curs)
def _testWithPlainCursorReal(self, getter):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
def testDictCursorWithNamedCursorFetchOne(self):
self._testWithNamedCursor(lambda curs: curs.fetchone())
def testDictCursorWithNamedCursorFetchMany(self):
self._testWithNamedCursor(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithNamedCursorFetchManyNoarg(self):
self._testWithNamedCursor(lambda curs: curs.fetchmany()[0])
def testDictCursorWithNamedCursorFetchAll(self):
self._testWithNamedCursor(lambda curs: curs.fetchall()[0])
def testDictCursorWithNamedCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithNamedCursor(getter)
@skip_before_postgres(8, 2)
def testDictCursorWithNamedCursorNotGreedy(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor)
self._testNamedCursorNotGreedy(curs)
@skip_before_postgres(8, 0)
def testDictCursorWithNamedCursorIterRowNumber(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor)
self._testIterRowNumber(curs)
def _testWithNamedCursor(self, getter):
curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.DictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
self.failUnless(row[0] == 'bar')
def testDictCursorRealWithNamedCursorFetchOne(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchone())
def testDictCursorRealWithNamedCursorFetchMany(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchmany(100)[0])
def testDictCursorRealWithNamedCursorFetchManyNoarg(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchmany()[0])
def testDictCursorRealWithNamedCursorFetchAll(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchall()[0])
def testDictCursorRealWithNamedCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithNamedCursorReal(getter)
@skip_before_postgres(8, 2)
def testDictCursorRealWithNamedCursorNotGreedy(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor)
self._testNamedCursorNotGreedy(curs)
@skip_before_postgres(8, 0)
def testDictCursorRealWithNamedCursorIterRowNumber(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor)
self._testIterRowNumber(curs)
def _testWithNamedCursorReal(self, getter):
curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
def _testNamedCursorNotGreedy(self, curs):
curs.itersize = 2
curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""")
recs = []
for t in curs:
time.sleep(0.01)
recs.append(t)
# check that the dataset was not fetched in a single gulp
self.assert_(recs[1]['ts'] - recs[0]['ts'] < timedelta(seconds=0.005))
self.assert_(recs[2]['ts'] - recs[1]['ts'] > timedelta(seconds=0.0099))
def _testIterRowNumber(self, curs):
# Only checking for dataset < itersize:
# see CursorTests.test_iter_named_cursor_rownumber
curs.itersize = 20
curs.execute("""select * from generate_series(1,10)""")
for i, r in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
def testPickleDictRow(self):
import pickle
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs.execute("select 10 as a, 20 as b")
r = curs.fetchone()
d = pickle.dumps(r)
r1 = pickle.loads(d)
self.assertEqual(r, r1)
self.assertEqual(r[0], r1[0])
self.assertEqual(r[1], r1[1])
self.assertEqual(r['a'], r1['a'])
self.assertEqual(r['b'], r1['b'])
self.assertEqual(r._index, r1._index)
def testPickleRealDictRow(self):
import pickle
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("select 10 as a, 20 as b")
r = curs.fetchone()
d = pickle.dumps(r)
r1 = pickle.loads(d)
self.assertEqual(r, r1)
self.assertEqual(r['a'], r1['a'])
self.assertEqual(r['b'], r1['b'])
self.assertEqual(r._column_mapping, r1._column_mapping)
class NamedTupleCursorTest(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
from psycopg2.extras import NamedTupleConnection
try:
from collections import namedtuple
except ImportError:
return
self.conn = self.connect(connection_factory=NamedTupleConnection)
curs = self.conn.cursor()
curs.execute("CREATE TEMPORARY TABLE nttest (i int, s text)")
curs.execute("INSERT INTO nttest VALUES (1, 'foo')")
curs.execute("INSERT INTO nttest VALUES (2, 'bar')")
curs.execute("INSERT INTO nttest VALUES (3, 'baz')")
self.conn.commit()
@skip_if_no_namedtuple
def test_cursor_args(self):
cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.DictCursor)
self.assertEqual(cur.name, 'foo')
self.assert_(isinstance(cur, psycopg2.extras.DictCursor))
@skip_if_no_namedtuple
def test_fetchone(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
t = curs.fetchone()
self.assertEqual(t[0], 1)
self.assertEqual(t.i, 1)
self.assertEqual(t[1], 'foo')
self.assertEqual(t.s, 'foo')
self.assertEqual(curs.rownumber, 1)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchmany_noarg(self):
curs = self.conn.cursor()
curs.arraysize = 2
curs.execute("select * from nttest order by 1")
res = curs.fetchmany()
self.assertEqual(2, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchmany(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
res = curs.fetchmany(2)
self.assertEqual(2, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchall(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
res = curs.fetchall()
self.assertEqual(3, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(res[2].i, 3)
self.assertEqual(res[2].s, 'baz')
self.assertEqual(curs.rownumber, 3)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_executemany(self):
curs = self.conn.cursor()
curs.executemany("delete from nttest where i = %s",
[(1,), (2,)])
curs.execute("select * from nttest order by 1")
res = curs.fetchall()
self.assertEqual(1, len(res))
self.assertEqual(res[0].i, 3)
self.assertEqual(res[0].s, 'baz')
@skip_if_no_namedtuple
def test_iter(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
i = iter(curs)
self.assertEqual(curs.rownumber, 0)
t = i.next()
self.assertEqual(t.i, 1)
self.assertEqual(t.s, 'foo')
self.assertEqual(curs.rownumber, 1)
self.assertEqual(curs.rowcount, 3)
t = i.next()
self.assertEqual(t.i, 2)
self.assertEqual(t.s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
t = i.next()
self.assertEqual(t.i, 3)
self.assertEqual(t.s, 'baz')
self.assertRaises(StopIteration, i.next)
self.assertEqual(curs.rownumber, 3)
self.assertEqual(curs.rowcount, 3)
def test_error_message(self):
try:
from collections import namedtuple
except ImportError:
# an import error somewhere
from psycopg2.extras import NamedTupleConnection
try:
self.conn = self.connect(
connection_factory=NamedTupleConnection)
curs = self.conn.cursor()
curs.execute("select 1")
curs.fetchone()
except ImportError:
pass
else:
self.fail("expecting ImportError")
else:
return self.skipTest("namedtuple available")
@skip_if_no_namedtuple
def test_record_updated(self):
curs = self.conn.cursor()
curs.execute("select 1 as foo;")
r = curs.fetchone()
self.assertEqual(r.foo, 1)
curs.execute("select 2 as bar;")
r = curs.fetchone()
self.assertEqual(r.bar, 2)
self.assertRaises(AttributeError, getattr, r, 'foo')
@skip_if_no_namedtuple
def test_no_result_no_surprise(self):
curs = self.conn.cursor()
curs.execute("update nttest set s = s")
self.assertRaises(psycopg2.ProgrammingError, curs.fetchone)
curs.execute("update nttest set s = s")
self.assertRaises(psycopg2.ProgrammingError, curs.fetchall)
@skip_if_no_namedtuple
def test_minimal_generation(self):
# Instrument the class to verify it gets called the minimum number of times.
from psycopg2.extras import NamedTupleCursor
f_orig = NamedTupleCursor._make_nt
calls = [0]
def f_patched(self_):
calls[0] += 1
return f_orig(self_)
NamedTupleCursor._make_nt = f_patched
try:
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchone()
curs.fetchone()
self.assertEqual(1, calls[0])
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchall()
self.assertEqual(2, calls[0])
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchmany(1)
self.assertEqual(3, calls[0])
finally:
NamedTupleCursor._make_nt = f_orig
@skip_if_no_namedtuple
@skip_before_postgres(8, 0)
def test_named(self):
curs = self.conn.cursor('tmp')
curs.execute("""select i from generate_series(0,9) i""")
recs = []
recs.extend(curs.fetchmany(5))
recs.append(curs.fetchone())
recs.extend(curs.fetchall())
self.assertEqual(range(10), [t.i for t in recs])
@skip_if_no_namedtuple
def test_named_fetchone(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
t = curs.fetchone()
self.assertEqual(t.i, 42)
@skip_if_no_namedtuple
def test_named_fetchmany(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
recs = curs.fetchmany(10)
self.assertEqual(recs[0].i, 42)
@skip_if_no_namedtuple
def test_named_fetchall(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
recs = curs.fetchall()
self.assertEqual(recs[0].i, 42)
@skip_if_no_namedtuple
@skip_before_postgres(8, 2)
def test_not_greedy(self):
curs = self.conn.cursor('tmp')
curs.itersize = 2
curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""")
recs = []
for t in curs:
time.sleep(0.01)
recs.append(t)
# check that the dataset was not fetched in a single gulp
self.assert_(recs[1].ts - recs[0].ts < timedelta(seconds=0.005))
self.assert_(recs[2].ts - recs[1].ts > timedelta(seconds=0.0099))
@skip_if_no_namedtuple
@skip_before_postgres(8, 0)
def test_named_rownumber(self):
curs = self.conn.cursor('tmp')
# Only checking for dataset < itersize:
# see CursorTests.test_iter_named_cursor_rownumber
curs.itersize = 4
curs.execute("""select * from generate_series(1,3)""")
for i, t in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
npinto/bangmetric | bangmetric/_sklearn_ridge.py | 1 | 28105 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from sklearn.linear_model.base import LinearModel
from sklearn.base import RegressorMixin
from sklearn.base import ClassifierMixin
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import safe_asarray
from sklearn.preprocessing import LabelBinarizer
from sklearn.grid_search import GridSearchCV
def _solve(A, b, solver, tol):
# helper method for ridge_regression, A is symmetric positive
if solver == 'auto':
if hasattr(A, 'todense'):
solver = 'sparse_cg'
else:
solver = 'dense_cholesky'
if solver == 'sparse_cg':
if b.ndim < 2:
from scipy.sparse import linalg as sp_linalg
sol, error = sp_linalg.cg(A, b, tol=tol)
if error:
raise ValueError("Failed with error code %d" % error)
return sol
else:
# sparse_cg cannot handle a 2-d b.
sol = []
for j in range(b.shape[1]):
sol.append(_solve(A, b[:, j], solver="sparse_cg", tol=tol))
return np.array(sol).T
elif solver == 'dense_cholesky':
from scipy import linalg
if hasattr(A, 'todense'):
A = A.todense()
return linalg.solve(A, b, sym_pos=True, overwrite_a=True)
else:
raise NotImplementedError('Solver %s not implemented' % solver)
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto', tol=1e-3):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}, optional
Solver to use in the computational routines. 'dense_cholesky'
will use the standard scipy.linalg.solve function, 'sparse_cg'
will use the conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X.
tol: float
Precision of the solution.
Returns
-------
coef: array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
is_sparse = False
if hasattr(X, 'todense'): # lazy import of scipy.sparse
from scipy import sparse
is_sparse = sparse.issparse(X)
if is_sparse:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
I = sparse.lil_matrix((n_samples, n_samples))
I.setdiag(np.ones(n_samples) * alpha * sample_weight)
c = _solve(X * X.T + I, y, solver, tol)
coef = X.T * c
else:
I = sparse.lil_matrix((n_features, n_features))
I.setdiag(np.ones(n_features) * alpha)
coef = _solve(X.T * X + I, X.T * y, solver, tol)
else:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
A = np.dot(X, X.T)
A.flat[::n_samples + 1] += alpha * sample_weight
coef = np.dot(X.T, _solve(A, y, solver, tol))
else:
# ridge
# w = inv(X^t X + alpha*Id) * X.T y
A = np.dot(X.T, X)
A.flat[::n_features + 1] += alpha
coef = _solve(A, np.dot(X.T, y), solver, tol)
return coef.T
class _BaseRidge(LinearModel):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, tol=1e-3):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.tol = tol
def fit(self, X, y, sample_weight=1.0, solver='auto'):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'dense_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
X, y, X_mean, y_mean, X_std = \
self._center_data(X, y, self.fit_intercept,
self.normalize, self.copy_X)
self.coef_ = ridge_regression(X, y, self.alpha, sample_weight,
solver, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_responses]).
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
tol: float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, normalize=False,
tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, tol=1e-3):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X, tol=tol)
class RidgeClassifier(_BaseRidge, ClassifierMixin):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
tol: float
Precision of the solution.
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, tol=1e-3, class_weight=None):
super(RidgeClassifier, self).__init__(alpha=alpha,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, tol=tol)
self.class_weight = class_weight
def fit(self, X, y, solver='auto'):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'dense_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight is None:
class_weight = {}
else:
class_weight = self.class_weight
sample_weight_classes = np.array([class_weight.get(k, 1.0) for k in y])
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
_BaseRidge.fit(self, X, Y, solver=solver,
sample_weight=sample_weight_classes)
return self
def predict(self, X):
"""Predict target values according to the fitted model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples]
"""
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0], fit_intercept=True,
normalize=False, score_func=None, loss_func=None,
copy_X=True, gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
from scipy import linalg
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
from scipy import sparse
if sparse.issparse(X) and hasattr(X, 'toarray'):
X = X.toarray()
U, s, _ = np.linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
error = self.score_func is None and self.loss_func is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(sample_weight * alpha, y, v, Q, QT_y)
else:
out, c = _values(sample_weight * alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
func = self.score_func if self.score_func else self.loss_func
out = [func(y.ravel(), cv_values[:, i]) for i in range(len(self.alphas))]
best = np.argmax(out) if self.score_func else np.argmin(out)
self.best_alpha = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]),
fit_intercept=True, normalize=False, score_func=None,
loss_func=None, cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas, self.fit_intercept,
self.score_func, self.loss_func,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.best_alpha = estimator.best_alpha
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.best_alpha = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Parameters
----------
alphas: numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
score_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (big is good)
if None is passed, the score of the estimator is maximized
loss_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (small is good)
if None is passed, the score of the estimator is maximized
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features, otherwise use eigen
'svd' : force computation via singular value decomposition of X
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper \
option of the two depending upon the shape of the training data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
`coef_` : array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas],
optional
Cross-validation values for each alpha (if
`store_cv_values=True` and `cv=None`). After `fit()` has been
called, this attribute will contain the mean squared errors (by
default) or the values of the `{loss,score}_func` function (if
provided in the constructor).
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
"""
pass
class RidgeClassifierCV(_BaseRidgeCV, ClassifierMixin):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas: numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
score_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (big is good)
if None is passed, the score of the estimator is maximized
loss_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediction (small is good)
if None is passed, the score of the estimator is maximized
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, score_func=None, loss_func=None, cv=None,
class_weight=None):
super(RidgeClassifierCV, self).__init__(alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
score_func=score_func, loss_func=loss_func, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=1.0, class_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : float or numpy array of shape [n_samples]
Sample weight
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
Returns
-------
self : object
Returns self.
"""
if class_weight != None:
warnings.warn("'class_weight' is now an initialization parameter."
"Using it in the 'fit' method is deprecated.",
DeprecationWarning)
self.class_weight_ = class_weight
else:
self.class_weight_ = self.class_weight
if self.class_weight_ is None:
self.class_weight_ = {}
sample_weight2 = np.array([self.class_weight_.get(k, 1.0) for k in y])
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
_BaseRidgeCV.fit(self, X, Y,
sample_weight=sample_weight * sample_weight2)
return self
def predict(self, X):
"""Predict target values according to the fitted model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples]
"""
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
| bsd-3-clause |
moijes12/oh-mainline | vendor/packages/kombu/kombu/pidbox.py | 23 | 12704 | """
kombu.pidbox
===============
Generic process mailbox.
"""
from __future__ import absolute_import
import socket
import warnings
from collections import defaultdict, deque
from copy import copy
from itertools import count
from threading import local
from time import time
from . import Exchange, Queue, Consumer, Producer
from .clocks import LamportClock
from .common import maybe_declare, oid_from
from .exceptions import InconsistencyError
from .five import range
from .log import get_logger
from .utils import cached_property, kwdict, uuid, reprcall
REPLY_QUEUE_EXPIRES = 10
W_PIDBOX_IN_USE = """\
A node named {node.hostname} is already using this process mailbox!
Maybe you forgot to shutdown the other node or did not do so properly?
Or if you meant to start multiple nodes on the same host please make sure
you give each node a unique node name!
"""
__all__ = ['Node', 'Mailbox']
logger = get_logger(__name__)
debug, error = logger.debug, logger.error
class Node(object):
#: hostname of the node.
hostname = None
#: the :class:`Mailbox` this is a node for.
mailbox = None
#: map of method name/handlers.
handlers = None
#: current context (passed on to handlers)
state = None
#: current channel.
channel = None
def __init__(self, hostname, state=None, channel=None,
handlers=None, mailbox=None):
self.channel = channel
self.mailbox = mailbox
self.hostname = hostname
self.state = state
self.adjust_clock = self.mailbox.clock.adjust
if handlers is None:
handlers = {}
self.handlers = handlers
def Consumer(self, channel=None, no_ack=True, accept=None, **options):
queue = self.mailbox.get_queue(self.hostname)
def verify_exclusive(name, messages, consumers):
if consumers:
warnings.warn(W_PIDBOX_IN_USE.format(node=self))
queue.on_declared = verify_exclusive
return Consumer(
channel or self.channel, [queue], no_ack=no_ack,
accept=self.mailbox.accept if accept is None else accept,
**options
)
def handler(self, fun):
self.handlers[fun.__name__] = fun
return fun
def on_decode_error(self, message, exc):
error('Cannot decode message: %r', exc, exc_info=1)
def listen(self, channel=None, callback=None):
consumer = self.Consumer(channel=channel,
callbacks=[callback or self.handle_message],
on_decode_error=self.on_decode_error)
consumer.consume()
return consumer
def dispatch(self, method, arguments=None,
reply_to=None, ticket=None, **kwargs):
arguments = arguments or {}
debug('pidbox received method %s [reply_to:%s ticket:%s]',
reprcall(method, (), kwargs=arguments), reply_to, ticket)
handle = reply_to and self.handle_call or self.handle_cast
try:
reply = handle(method, kwdict(arguments))
except SystemExit:
raise
except Exception as exc:
error('pidbox command error: %r', exc, exc_info=1)
reply = {'error': repr(exc)}
if reply_to:
self.reply({self.hostname: reply},
exchange=reply_to['exchange'],
routing_key=reply_to['routing_key'],
ticket=ticket)
return reply
def handle(self, method, arguments={}):
return self.handlers[method](self.state, **arguments)
def handle_call(self, method, arguments):
return self.handle(method, arguments)
def handle_cast(self, method, arguments):
return self.handle(method, arguments)
def handle_message(self, body, message=None):
destination = body.get('destination')
if message:
self.adjust_clock(message.headers.get('clock') or 0)
if not destination or self.hostname in destination:
return self.dispatch(**kwdict(body))
dispatch_from_message = handle_message
def reply(self, data, exchange, routing_key, ticket, **kwargs):
self.mailbox._publish_reply(data, exchange, routing_key, ticket,
channel=self.channel,
serializer=self.mailbox.serializer)
class Mailbox(object):
node_cls = Node
exchange_fmt = '%s.pidbox'
reply_exchange_fmt = 'reply.%s.pidbox'
#: Name of application.
namespace = None
#: Connection (if bound).
connection = None
#: Exchange type (usually direct, or fanout for broadcast).
type = 'direct'
#: mailbox exchange (init by constructor).
exchange = None
#: exchange to send replies to.
reply_exchange = None
#: Only accepts json messages by default.
accept = ['json']
#: Message serializer
serializer = None
def __init__(self, namespace,
type='direct', connection=None, clock=None,
accept=None, serializer=None):
self.namespace = namespace
self.connection = connection
self.type = type
self.clock = LamportClock() if clock is None else clock
self.exchange = self._get_exchange(self.namespace, self.type)
self.reply_exchange = self._get_reply_exchange(self.namespace)
self._tls = local()
self.unclaimed = defaultdict(deque)
self.accept = self.accept if accept is None else accept
self.serializer = self.serializer if serializer is None else serializer
def __call__(self, connection):
bound = copy(self)
bound.connection = connection
return bound
def Node(self, hostname=None, state=None, channel=None, handlers=None):
hostname = hostname or socket.gethostname()
return self.node_cls(hostname, state, channel, handlers, mailbox=self)
def call(self, destination, command, kwargs={},
timeout=None, callback=None, channel=None):
return self._broadcast(command, kwargs, destination,
reply=True, timeout=timeout,
callback=callback,
channel=channel)
def cast(self, destination, command, kwargs={}):
return self._broadcast(command, kwargs, destination, reply=False)
def abcast(self, command, kwargs={}):
return self._broadcast(command, kwargs, reply=False)
def multi_call(self, command, kwargs={}, timeout=1,
limit=None, callback=None, channel=None):
return self._broadcast(command, kwargs, reply=True,
timeout=timeout, limit=limit,
callback=callback,
channel=channel)
def get_reply_queue(self):
oid = self.oid
return Queue(
'%s.%s' % (oid, self.reply_exchange.name),
exchange=self.reply_exchange,
routing_key=oid,
durable=False,
auto_delete=True,
queue_arguments={'x-expires': int(REPLY_QUEUE_EXPIRES * 1000)},
)
@cached_property
def reply_queue(self):
return self.get_reply_queue()
def get_queue(self, hostname):
return Queue('%s.%s.pidbox' % (hostname, self.namespace),
exchange=self.exchange,
durable=False,
auto_delete=True)
def _publish_reply(self, reply, exchange, routing_key, ticket,
channel=None, **opts):
chan = channel or self.connection.default_channel
exchange = Exchange(exchange, exchange_type='direct',
delivery_mode='transient',
durable=False)
producer = Producer(chan, auto_declare=False)
try:
producer.publish(
reply, exchange=exchange, routing_key=routing_key,
declare=[exchange], headers={
'ticket': ticket, 'clock': self.clock.forward(),
},
**opts
)
except InconsistencyError:
pass # queue probably deleted and no one is expecting a reply.
def _publish(self, type, arguments, destination=None,
reply_ticket=None, channel=None, timeout=None,
serializer=None):
message = {'method': type,
'arguments': arguments,
'destination': destination}
chan = channel or self.connection.default_channel
exchange = self.exchange
if reply_ticket:
maybe_declare(self.reply_queue(channel))
message.update(ticket=reply_ticket,
reply_to={'exchange': self.reply_exchange.name,
'routing_key': self.oid})
serializer = serializer or self.serializer
producer = Producer(chan, auto_declare=False)
producer.publish(
message, exchange=exchange.name, declare=[exchange],
headers={'clock': self.clock.forward(),
'expires': time() + timeout if timeout else 0},
serializer=serializer,
)
def _broadcast(self, command, arguments=None, destination=None,
reply=False, timeout=1, limit=None,
callback=None, channel=None, serializer=None):
if destination is not None and \
not isinstance(destination, (list, tuple)):
raise ValueError(
'destination must be a list/tuple not {0}'.format(
type(destination)))
arguments = arguments or {}
reply_ticket = reply and uuid() or None
chan = channel or self.connection.default_channel
# Set reply limit to number of destinations (if specified)
if limit is None and destination:
limit = destination and len(destination) or None
serializer = serializer or self.serializer
self._publish(command, arguments, destination=destination,
reply_ticket=reply_ticket,
channel=chan,
timeout=timeout,
serializer=serializer)
if reply_ticket:
return self._collect(reply_ticket, limit=limit,
timeout=timeout,
callback=callback,
channel=chan)
def _collect(self, ticket,
limit=None, timeout=1, callback=None,
channel=None, accept=None):
if accept is None:
accept = self.accept
chan = channel or self.connection.default_channel
queue = self.reply_queue
consumer = Consumer(channel, [queue], accept=accept, no_ack=True)
responses = []
unclaimed = self.unclaimed
adjust_clock = self.clock.adjust
try:
return unclaimed.pop(ticket)
except KeyError:
pass
def on_message(body, message):
# ticket header added in kombu 2.5
header = message.headers.get
adjust_clock(header('clock') or 0)
expires = header('expires')
if expires and time() > expires:
return
this_id = header('ticket', ticket)
if this_id == ticket:
if callback:
callback(body)
responses.append(body)
else:
unclaimed[this_id].append(body)
consumer.register_callback(on_message)
try:
with consumer:
for i in limit and range(limit) or count():
try:
self.connection.drain_events(timeout=timeout)
except socket.timeout:
break
return responses
finally:
chan.after_reply_message_received(queue.name)
def _get_exchange(self, namespace, type):
return Exchange(self.exchange_fmt % namespace,
type=type,
durable=False,
delivery_mode='transient')
def _get_reply_exchange(self, namespace):
return Exchange(self.reply_exchange_fmt % namespace,
type='direct',
durable=False,
delivery_mode='transient')
@cached_property
def oid(self):
try:
return self._tls.OID
except AttributeError:
oid = self._tls.OID = oid_from(self)
return oid
| agpl-3.0 |
cervinko/calibre-web | vendor/jinja2/__init__.py | 14 | 2268 | # -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.7'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction'
]
| gpl-3.0 |
nrhine1/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
v0i0/lammps | python/examples/simple.py | 9 | 2271 | #!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# simple.py
# Purpose: mimic operation of examples/COUPLE/simple/simple.cpp via Python
# Serial syntax: simple.py in.lammps
# in.lammps = LAMMPS input script
# Parallel syntax: mpirun -np 4 simple.py in.lammps
# in.lammps = LAMMPS input script
# also need to uncomment either Pypar or mpi4py sections below
from __future__ import print_function
import sys
import numpy as np
import ctypes
# parse command line
argv = sys.argv
if len(argv) != 2:
print("Syntax: simple.py in.lammps")
sys.exit()
infile = sys.argv[1]
me = 0
# uncomment this if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
# uncomment this if running in parallel via mpi4py
#from mpi4py import MPI
#me = MPI.COMM_WORLD.Get_rank()
#nprocs = MPI.COMM_WORLD.Get_size()
from lammps import lammps
lmp = lammps()
# run infile one line at a time
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
# run 10 more steps
# get coords from LAMMPS
# change coords of 1st atom
# put coords back into LAMMPS
# run a single step with changed coords
lmp.command("run 10")
x = lmp.gather_atoms("x",1,3)
v = lmp.gather_atoms("v",1,3)
epsilon = 0.1
x[0] += epsilon
lmp.scatter_atoms("x",1,3,x)
lmp.command("run 1");
# extract force on single atom two different ways
f = lmp.extract_atom("f",3)
print("Force on 1 atom via extract_atom: ",f[0][0])
fx = lmp.extract_variable("fx","all",1)
print("Force on 1 atom via extract_variable:",fx[0])
# use commands_string() and commands_list() to invoke more commands
strtwo = "run 10\nrun 20"
lmp.commands_string(strtwo)
cmds = ["run 10","run 20"]
lmp.commands_list(cmds)
# delete all atoms
# create_atoms() to create new ones with old coords, vels
# initial thermo should be same as step 20
natoms = lmp.get_natoms()
type = natoms*[1]
lmp.command("delete_atoms group all");
lmp.create_atoms(natoms,None,type,x,v);
lmp.command("run 10");
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
# uncomment if running in parallel via mpi4py
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#MPI.Finalize()
| gpl-2.0 |
songfj/calibre | src/calibre/ebooks/rtf2xml/border_parse.py | 22 | 8124 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys
class BorderParse:
"""
Parse a border line and return a dictionary of attributes and values
"""
def __init__(self):
# cw<bd<bor-t-r-hi<nu<true
self.__border_dict = {
'bor-t-r-hi' : 'border-table-row-horizontal-inside',
'bor-t-r-vi' : 'border-table-row-vertical-inside',
'bor-t-r-to' : 'border-table-row-top',
'bor-t-r-le' : 'border-table-row-left',
'bor-t-r-bo' : 'border-table-row-bottom',
'bor-t-r-ri' : 'border-table-row-right',
'bor-cel-bo' : 'border-cell-bottom',
'bor-cel-to' : 'border-cell-top',
'bor-cel-le' : 'border-cell-left',
'bor-cel-ri' : 'border-cell-right',
'bor-par-bo' : 'border-paragraph-bottom',
'bor-par-to' : 'border-paragraph-top',
'bor-par-le' : 'border-paragraph-left',
'bor-par-ri' : 'border-paragraph-right',
'bor-par-bx' : 'border-paragraph-box',
'bor-for-ev' : 'border-for-every-paragraph',
'bor-outsid' : 'border-outside',
'bor-none__' : 'border',
# border type => bt
'bdr-li-wid' : 'line-width',
'bdr-sp-wid' : 'padding',
'bdr-color_' : 'color',
}
self.__border_style_dict = {
'bdr-single' : 'single',
'bdr-doubtb' : 'double-thickness-border',
'bdr-shadow' : 'shadowed-border',
'bdr-double' : 'double-border',
'bdr-dotted' : 'dotted-border',
'bdr-dashed' : 'dashed',
'bdr-hair__' : 'hairline',
'bdr-inset_' : 'inset',
'bdr-das-sm' : 'dash-small',
'bdr-dot-sm' : 'dot-dash',
'bdr-dot-do' : 'dot-dot-dash',
'bdr-outset' : 'outset',
'bdr-trippl' : 'tripple',
'bdr-thsm__' : 'thick-thin-small',
'bdr-htsm__' : 'thin-thick-small',
'bdr-hthsm_' : 'thin-thick-thin-small',
'bdr-thm___' : 'thick-thin-medium',
'bdr-htm___' : 'thin-thick-medium',
'bdr-hthm__' : 'thin-thick-thin-medium',
'bdr-thl___' : 'thick-thin-large',
'bdr-hthl__' : 'thin-thick-thin-large',
'bdr-wavy__' : 'wavy',
'bdr-d-wav_' : 'double-wavy',
'bdr-strip_' : 'striped',
'bdr-embos_' : 'emboss',
'bdr-engra_' : 'engrave',
'bdr-frame_' : 'frame',
}
def parse_border(self, line):
"""
Requires:
line -- line with border definition in it
Returns:
?
Logic:
"""
border_dict = {}
border_style_dict = {}
border_style_list = []
border_type = self.__border_dict.get(line[6:16])
if not border_type:
sys.stderr.write(
'module is border_parse.py\n'
'function is parse_border\n'
'token does not have a dictionary value\n'
'token is "%s"' % line
)
return border_dict
att_line = line[20:-1]
atts = att_line.split('|')
# cw<bd<bor-cel-ri<nu<
# border has no value--should be no lines
if len(atts) == 1 and atts[0] == '':
border_dict[border_type] = 'none'
return border_dict
# border-paragraph-right
for att in atts:
values = att.split(':')
if len(values) ==2:
att = values[0]
value = values[1]
else:
value = 'true'
style_att = self.__border_style_dict.get(att)
if style_att:
att = '%s-%s' % (border_type, att)
border_style_dict[att] = value
border_style_list.append(style_att)
else:
att = self.__border_dict.get(att)
if not att:
sys.stderr.write(
'module is border_parse_def.py\n'
'function is parse_border\n'
'token does not have an att value\n'
'line is "%s"' % line
)
att = '%s-%s' % (border_type, att)
border_dict[att] = value
new_border_dict = self.__determine_styles(border_type, border_style_list)
border_dict.update(new_border_dict)
return border_dict
def __determine_styles(self, border_type, border_style_list):
new_border_dict = {}
att = '%s-style' % border_type
if 'shadowed-border' in border_style_list:
new_border_dict[att] = 'shadowed'
elif 'engraved' in border_style_list:
new_border_dict[att] = 'engraved'
elif 'emboss' in border_style_list:
new_border_dict[att] = 'emboss'
elif 'striped' in border_style_list:
new_border_dict[att] = 'striped'
elif 'thin-thick-thin-small' in border_style_list:
new_border_dict[att] = 'thin-thick-thin-small'
elif 'thick-thin-large' in border_style_list:
new_border_dict[att] = 'thick-thin-large'
elif 'thin-thick-thin-medium' in border_style_list:
new_border_dict[att] = 'thin-thick-thin-medium'
elif 'thin-thick-medium' in border_style_list:
new_border_dict[att] = 'thin-thick-medium'
elif 'thick-thin-medium' in border_style_list:
new_border_dict[att] = 'thick-thin-medium'
elif 'thick-thin-small' in border_style_list:
new_border_dict[att] = 'thick-thin-small'
elif 'thick-thin-small' in border_style_list:
new_border_dict[att] = 'thick-thin-small'
elif 'double-wavy' in border_style_list:
new_border_dict[att] = 'double-wavy'
elif 'dot-dot-dash' in border_style_list:
new_border_dict[att] = 'dot-dot-dash'
elif 'dot-dash' in border_style_list:
new_border_dict[att] = 'dot-dash'
elif 'dotted-border' in border_style_list:
new_border_dict[att] = 'dotted'
elif 'wavy' in border_style_list:
new_border_dict[att] = 'wavy'
elif 'dash-small' in border_style_list:
new_border_dict[att] = 'dash-small'
elif 'dashed' in border_style_list:
new_border_dict[att] = 'dashed'
elif 'frame' in border_style_list:
new_border_dict[att] = 'frame'
elif 'inset' in border_style_list:
new_border_dict[att] = 'inset'
elif 'outset' in border_style_list:
new_border_dict[att] = 'outset'
elif 'tripple-border' in border_style_list:
new_border_dict[att] = 'tripple'
elif 'double-border' in border_style_list:
new_border_dict[att] = 'double'
elif 'double-thickness-border' in border_style_list:
new_border_dict[att] = 'double-thickness'
elif 'hairline' in border_style_list:
new_border_dict[att] = 'hairline'
elif 'single' in border_style_list:
new_border_dict[att] = 'single'
else:
if border_style_list:
new_border_dict[att] = border_style_list[0]
return new_border_dict
| gpl-3.0 |
n1bor/bitcoin | test/functional/feature_config_args.py | 6 | 13099 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework import util
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
self.wallet_names = []
def test_config_file_parser(self):
self.stop_node(0)
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file_path))
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli=1',
extra_args=['-dash_cli=1'],
)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('dash_conf=1\n')
with self.nodes[0].assert_debug_log(expected_msgs=['Ignoring unknown configuration value dash_conf']):
self.start_node(0)
self.stop_node(0)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('-dash=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
if self.is_wallet_compiled():
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on %s network when in [%s] section.' % (self.chain, self.chain))
main_conf_file_path = os.path.join(self.options.tmpdir, 'node0', 'bitcoin_main.conf')
util.write_config(main_conf_file_path, n=0, chain='', extra_config='includeconf={}\n'.format(inc_conf_file_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('acceptnonstdtxn=1\n')
self.nodes[0].assert_start_raises_init_error(extra_args=["-conf={}".format(main_conf_file_path)], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 4, using # in rpcpassword can be ambiguous and should be avoided')
inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf')
with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file2_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('testnot.datadir=1\n')
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('[testnet]\n')
self.restart_node(0)
self.nodes[0].stop_node(expected_stderr='Warning: ' + inc_conf_file_path + ':1 Section [testnot] is not recognized.' + os.linesep + inc_conf_file2_path + ':1 Section [testnet] is not recognized.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
def test_invalid_command_line_options(self):
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: No proxy server specified. Use -proxy=<ip> or -proxy=<ip:port>.',
extra_args=['-proxy'],
)
def test_log_buffer(self):
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0\n']):
self.start_node(0, extra_args=['-noconnect=0'])
def test_args_log(self):
self.stop_node(0)
self.log.info('Test config args logging')
with self.nodes[0].assert_debug_log(
expected_msgs=[
'Command-line arg: addnode="some.node"',
'Command-line arg: rpcauth=****',
'Command-line arg: rpcbind=****',
'Command-line arg: rpcpassword=****',
'Command-line arg: rpcuser=****',
'Command-line arg: torpassword=****',
'Config file arg: %s="1"' % self.chain,
'Config file arg: [%s] server="1"' % self.chain,
],
unexpected_msgs=[
'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'127.1.1.1',
'secret-rpcuser',
'secret-torpassword',
]):
self.start_node(0, extra_args=[
'-addnode=some.node',
'-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'-rpcbind=127.1.1.1',
'-rpcpassword=',
'-rpcuser=secret-rpcuser',
'-torpassword=secret-torpassword',
])
def test_networkactive(self):
self.log.info('Test -networkactive option')
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0)
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0, extra_args=['-networkactive'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0, extra_args=['-networkactive=1'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-networkactive=0'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-nonetworkactive'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-nonetworkactive=1'])
def test_seed_peers(self):
self.log.info('Test seed peers')
default_data_dir = self.nodes[0].datadir
# Only regtest has no fixed seeds. To avoid connections to random
# nodes, regtest is the only network where it is safe to enable
# -fixedseeds in tests
util.assert_equal(self.nodes[0].getblockchaininfo()['chain'],'regtest')
self.stop_node(0)
# No peers.dat exists and -dnsseed=1
# We expect the node will use DNS Seeds, but Regtest mode has 0 DNS seeds
# So after 60 seconds, the node should fallback to fixed seeds (this is a slow test)
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = int(time.time())
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"0 addresses found from DNS seeds",
]):
self.start_node(0, extra_args=['-dnsseed=1', '-fixedseeds=1', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
"Adding fixed seeds as 60 seconds have passed and addrman is empty",
]):
self.nodes[0].setmocktime(start + 65)
self.stop_node(0)
# No peers.dat exists and -dnsseed=0
# We expect the node will fallback immediately to fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = time.time()
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n",
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1'])
assert time.time() - start < 60
self.stop_node(0)
# No peers.dat exists and dns seeds are disabled.
# We expect the node will not add fixed seeds when explicitly disabled.
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = time.time()
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"Fixed seeds are disabled",
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=0'])
assert time.time() - start < 60
self.stop_node(0)
# No peers.dat exists and -dnsseed=0, but a -addnode is provided
# We expect the node will allow 60 seconds prior to using fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = int(time.time())
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1', '-addnode=fakenodeaddr', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
"Adding fixed seeds as 60 seconds have passed and addrman is empty",
]):
self.nodes[0].setmocktime(start + 65)
def run_test(self):
self.test_log_buffer()
self.test_args_log()
self.test_seed_peers()
self.test_networkactive()
self.test_config_file_parser()
self.test_invalid_command_line_options()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = self.nodes[0].datadir
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "bitcoin.conf")
# datadir needs to be set before [chain] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error: Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file])
assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'blocks'))
if __name__ == '__main__':
ConfArgsTest().main()
| mit |
BonexGu/Blik2D-SDK | Blik2D/addon/opencv-3.1.0_for_blik/samples/python/lk_track.py | 5 | 3155 | #!/usr/bin/env python
'''
Lucas-Kanade tracker
====================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames.
Usage
-----
lk_track.py [<video_source>]
Keys
----
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
import video
from common import anorm2, draw_str
from time import clock
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
class App:
def __init__(self, video_src):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.cam = video.create_capture(video_src)
self.frame_idx = 0
def run(self):
while True:
ret, frame = self.cam.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
vis = frame.copy()
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
self.tracks = new_tracks
cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
if self.frame_idx % self.detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
self.frame_idx += 1
self.prev_gray = frame_gray
cv2.imshow('lk_track', vis)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
def main():
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
print(__doc__)
App(video_src).run()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| mit |
ContinuumIO/odo | odo/backends/tests/test_text.py | 9 | 1610 | from __future__ import absolute_import, division, print_function
from odo.backends.text import (TextFile, resource, convert, discover, append,
drop, chunks, Temp)
from odo.utils import tmpfile, filetexts, filetext
from datashape import dshape
import gzip
import os
def test_resource():
assert isinstance(resource('foo.txt'), TextFile)
assert isinstance(resource('foo.log.gz'), TextFile)
assert isinstance(resource('/path/to/foo.log'), TextFile)
def test_open():
r = resource('foo.log.gz')
assert r.open == gzip.open
def test_convert():
with filetext('Hello\nWorld') as fn:
assert convert(list, TextFile(fn)) == ['Hello\n', 'World']
def test_append():
with tmpfile('log') as fn:
t = TextFile(fn)
append(t, ['Hello', 'World'])
assert os.path.exists(fn)
with open(fn) as f:
assert list(map(str.strip, f.readlines())) == ['Hello', 'World']
def test_discover():
assert discover(TextFile('')) == dshape('var * string')
def test_drop():
with filetext('hello\nworld') as fn:
t = TextFile(fn)
assert os.path.exists(fn)
drop(t)
assert not os.path.exists(fn)
def test_chunks_textfile():
with filetexts({'a1.log': 'Hello\nWorld', 'a2.log': 'Hola\nMundo'}) as fns:
logs = chunks(TextFile)(list(map(TextFile, fns)))
assert set(map(str.strip, convert(list, logs))) == \
set(['Hello', 'World', 'Hola', 'Mundo'])
def test_temp():
t = convert(Temp(TextFile), [1, 2, 3])
assert [int(line.strip()) for line in convert(list, t)] == [1, 2, 3]
| bsd-3-clause |
Raghavan-Lab/LabUtilities | split_fasta.py | 1 | 2647 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Splits a multi-sequence fasta file into multiple files containing n number of reads.
Examples:
Split a very large fasta file into smaller files containing 10,000 reads
python split_fasta.py -f large_amino_acid_file.faa -n 10000
NOTE: This script places the output files in the current directory. It's recommended that
you create an empty directory to work in.
"""
import argparse
from collections import namedtuple
from Bio import SeqIO
def get_options():
"""
Returns command line arguments as an Argparse object
:return: command line options
"""
parser = argparse.ArgumentParser()
parser.add_argument('-n', help='Number of reads per output file', default=1000)
parser.add_argument('-f', help='Fasta file to split')
return parser.parse_args()
def save_fasta_slice(slice, start_record, end_record):
"""
Saves a list of namedtuples as a fasta file
:param slice: list of reads to save
:type : list
:param start_record: first record in slice
:type : int
:param end_record: last record in slice
:type : int
"""
output_file = 'sequences_{}_to_{}.fasta'.format(start_record, end_record)
with open(output_file, 'w') as fasta_slice:
for sequence in slice:
# output is in fasta format
fasta_slice.write('{id}\n{seq}\n\n'.format(id=sequence.seq_id,
seq=sequence.seq))
def main():
"""
Reads a fasta file into a list of tuples (sequence id, nucleotide/amino acid sequence) and
saves n sized batches into output files.
"""
# get command line arguments
options = get_options()
# Keep track of the read numbers for each chunk
first_seq = 1
last_seq = 0
sequence_buffer = []
FastaRead = namedtuple('FastaRead', ['seq_id', 'seq'])
for record in SeqIO.parse(options.f, "fasta"):
last_seq += 1
sequence_buffer.append(FastaRead('>{}'.format(record.description), record.seq))
# Save a file if we have enough reads
if len(sequence_buffer) >= int(options.n):
# flush the buffer
save_fasta_slice(sequence_buffer, first_seq, last_seq)
print('\tsaved records: {} to {}'.format(first_seq, last_seq))
# Reset buffer and starting record number
sequence_buffer = []
first_seq = last_seq + 1
# save remaining bits
save_fasta_slice(sequence_buffer, first_seq, last_seq)
if __name__ == '__main__':
print('Splitting Fasta file...')
main()
print('Job Complete')
| gpl-2.0 |
drpngx/tensorflow | tensorflow/python/kernel_tests/conv2d_transpose_test.py | 15 | 12105 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DTransposeTest(test.TestCase):
def testConv2DTransposeSingleStride(self):
with self.test_session():
strides = [1, 1, 1, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 6, 4, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
# We count the number of cells being added at the locations in the output.
# At the center, #cells=kernel_height * kernel_width
# At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
# At the borders, #cells=ceil(kernel_height/2)*kernel_width or
# kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[1] - 1
w_in = w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeSame(self):
with self.test_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 12, 8, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeValid(self):
with self.test_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 13, 9, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[2] - pad):
for h in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > pad and h < y_shape[
1] - 1 - pad
w_in = w % strides[2] == 0 and w > pad and w < y_shape[
2] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, h, w, k] = target
# copy values in the border
cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
cache_values[n, -1, :, k] = cache_values[n, -2, :, k]
self.assertAllClose(cache_values, value)
def testGradient(self):
x_shape = [2, 6, 4, 3]
f_shape = [3, 3, 2, 3]
y_shape = [2, 12, 8, 2]
strides = [1, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv2d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
def testConv2DTransposeSingleStrideNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 1, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 6, 4]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeSameNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 12, 8]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > 0 and h < y_shape[2] - 1
w_in = w % strides[3] == 0 and w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeValidNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 13, 9]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[3] - pad):
for h in xrange(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > pad and h < y_shape[
2] - 1 - pad
w_in = w % strides[3] == 0 and w > pad and w < y_shape[
3] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, k, h, w] = target
# copy values in the border
cache_values[n, k, :, 0] = cache_values[n, k, :, 1]
cache_values[n, k, :, -1] = cache_values[n, k, :, -2]
cache_values[n, k, 0, :] = cache_values[n, k, 1, :]
cache_values[n, k, -1, :] = cache_values[n, k, -2, :]
self.assertAllClose(cache_values, value)
@test_util.enable_c_shapes
def testConv2DTransposeShapeInference(self):
# Test case for 8972
initializer = random_ops.truncated_normal(
[3, 3, 5, 1], mean=0.0, stddev=0.01, dtype=dtypes.float32)
x = variables.Variable(random_ops.random_normal([3, 10, 5, 1]))
f = variable_scope.get_variable("f", initializer=initializer)
f_shape = array_ops.stack([array_ops.shape(x)[0], 10, 5, 5])
output = nn_ops.conv2d_transpose(
x, f, f_shape, strides=[1, 1, 1, 1], padding="SAME")
self.assertEqual(output.get_shape().as_list(), [3, 10, 5, 5])
if __name__ == "__main__":
test.main()
| apache-2.0 |
AospPlus/android_kernel_htc_enrc2b-old | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
edxnercel/edx-platform | lms/djangoapps/courseware/tests/test_footer.py | 63 | 2297 | """
Tests related to the basic footer-switching based off SITE_NAME to ensure
edx.org uses an edx footer but other instances use an Open edX footer.
"""
from mock import patch
from nose.plugins.attrib import attr
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
@attr('shard_1')
class TestFooter(TestCase):
SOCIAL_MEDIA_NAMES = [
"facebook",
"google_plus",
"twitter",
"linkedin",
"tumblr",
"meetup",
"reddit",
"youtube",
]
SOCIAL_MEDIA_URLS = {
"facebook": "http://www.facebook.com/",
"google_plus": "https://plus.google.com/",
"twitter": "https://twitter.com/",
"linkedin": "http://www.linkedin.com/",
"tumblr": "http://www.tumblr.com/",
"meetup": "http://www.meetup.com/",
"reddit": "http://www.reddit.com/",
"youtube": "https://www.youtube.com/"
}
def test_edx_footer(self):
"""
Verify that the homepage, when accessed at edx.org, has the edX footer
"""
with patch.dict('django.conf.settings.FEATURES', {"IS_EDX_DOMAIN": True}):
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'footer-edx-v3')
def test_openedx_footer(self):
"""
Verify that the homepage, when accessed at something other than
edx.org, has the Open edX footer
"""
with patch.dict('django.conf.settings.FEATURES', {"IS_EDX_DOMAIN": False}):
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'footer-openedx')
@patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': True})
@override_settings(
SOCIAL_MEDIA_FOOTER_NAMES=SOCIAL_MEDIA_NAMES,
SOCIAL_MEDIA_FOOTER_URLS=SOCIAL_MEDIA_URLS
)
def test_edx_footer_social_links(self):
resp = self.client.get('/')
for name, url in self.SOCIAL_MEDIA_URLS.iteritems():
self.assertContains(resp, url)
self.assertContains(resp, settings.SOCIAL_MEDIA_FOOTER_DISPLAY[name]['title'])
self.assertContains(resp, settings.SOCIAL_MEDIA_FOOTER_DISPLAY[name]['icon'])
| agpl-3.0 |
johan--/Quiz-Program | vendor/bundle/ruby/2.2.0/gems/libv8-3.16.14.7/vendor/gyp/test/builddir/gyptest-default.py | 74 | 2670 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify the settings that cause a set of programs to be created in
a specific build directory, and that no intermediate built files
get created outside of that build directory hierarchy even when
referred to with deeply-nested ../../.. paths.
"""
import TestGyp
# TODO(mmoss): Make only supports (theoretically) a single, global build
# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
# generators support, so this doesn't work yet for make.
# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
# the "--depth" location, which is one level above 'src', but then this test
# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
# its sources. I'm not sure if make is wrong for writing outside the current
# directory, or if the test is wrong for assuming everything generated is under
# the current directory.
# Android does not support setting the build directory.
test = TestGyp.TestGyp(formats=['!make', '!ninja', '!android'])
test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
if test.format == 'msvs':
if test.uses_msbuild:
test.must_contain('src/prog1.vcxproj',
'<OutDir>..\\builddir\\Default\\</OutDir>')
else:
test.must_contain('src/prog1.vcproj',
'OutputDirectory="..\\builddir\\Default\\"')
test.relocate('src', 'relocate/src')
test.subdir('relocate/builddir')
# Make sure that all the built ../../etc. files only get put under builddir,
# by making all of relocate read-only and then making only builddir writable.
test.writable('relocate', False)
test.writable('relocate/builddir', True)
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello from func1.c
"""
expect2 = """\
Hello from subdir2/prog2.c
Hello from func2.c
"""
expect3 = """\
Hello from subdir2/subdir3/prog3.c
Hello from func3.c
"""
expect4 = """\
Hello from subdir2/subdir3/subdir4/prog4.c
Hello from func4.c
"""
expect5 = """\
Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
Hello from func5.c
"""
def run_builddir(prog, expect):
dir = 'relocate/builddir/Default/'
test.run(program=test.workpath(dir + prog), stdout=expect)
run_builddir('prog1', expect1)
run_builddir('prog2', expect2)
run_builddir('prog3', expect3)
run_builddir('prog4', expect4)
run_builddir('prog5', expect5)
test.pass_test()
| cc0-1.0 |
hsduk/lets-encrypt-preview | acme/acme/jose/jwk_test.py | 32 | 7207 | """Tests for acme.jose.jwk."""
import binascii
import unittest
from acme import test_util
from acme.jose import errors
from acme.jose import json_util
from acme.jose import util
DSA_PEM = test_util.load_vector('dsa512_key.pem')
RSA256_KEY = test_util.load_rsa_private_key('rsa256_key.pem')
RSA512_KEY = test_util.load_rsa_private_key('rsa512_key.pem')
class JWKTest(unittest.TestCase):
"""Tests for acme.jose.jwk.JWK."""
def test_load(self):
from acme.jose.jwk import JWK
self.assertRaises(errors.Error, JWK.load, DSA_PEM)
def test_load_subclass_wrong_type(self):
from acme.jose.jwk import JWKRSA
self.assertRaises(errors.Error, JWKRSA.load, DSA_PEM)
class JWKTestBaseMixin(object):
"""Mixin test for JWK subclass tests."""
thumbprint = NotImplemented
def test_thumbprint_private(self):
self.assertEqual(self.thumbprint, self.jwk.thumbprint())
def test_thumbprint_public(self):
self.assertEqual(self.thumbprint, self.jwk.public_key().thumbprint())
class JWKOctTest(unittest.TestCase, JWKTestBaseMixin):
"""Tests for acme.jose.jwk.JWKOct."""
thumbprint = (b"\xf3\xe7\xbe\xa8`\xd2\xdap\xe9}\x9c\xce>"
b"\xd0\xfcI\xbe\xcd\x92'\xd4o\x0e\xf41\xea"
b"\x8e(\x8a\xb2i\x1c")
def setUp(self):
from acme.jose.jwk import JWKOct
self.jwk = JWKOct(key=b'foo')
self.jobj = {'kty': 'oct', 'k': json_util.encode_b64jose(b'foo')}
def test_to_partial_json(self):
self.assertEqual(self.jwk.to_partial_json(), self.jobj)
def test_from_json(self):
from acme.jose.jwk import JWKOct
self.assertEqual(self.jwk, JWKOct.from_json(self.jobj))
def test_from_json_hashable(self):
from acme.jose.jwk import JWKOct
hash(JWKOct.from_json(self.jobj))
def test_load(self):
from acme.jose.jwk import JWKOct
self.assertEqual(self.jwk, JWKOct.load(b'foo'))
def test_public_key(self):
self.assertTrue(self.jwk.public_key() is self.jwk)
class JWKRSATest(unittest.TestCase, JWKTestBaseMixin):
"""Tests for acme.jose.jwk.JWKRSA."""
# pylint: disable=too-many-instance-attributes
thumbprint = (b'\x83K\xdc#3\x98\xca\x98\xed\xcb\x80\x80<\x0c'
b'\xf0\x95\xb9H\xb2*l\xbd$\xe5&|O\x91\xd4 \xb0Y')
def setUp(self):
from acme.jose.jwk import JWKRSA
self.jwk256 = JWKRSA(key=RSA256_KEY.public_key())
self.jwk256json = {
'kty': 'RSA',
'e': 'AQAB',
'n': 'm2Fylv-Uz7trgTW8EBHP3FQSMeZs2GNQ6VRo1sIVJEk',
}
# pylint: disable=protected-access
self.jwk256_not_comparable = JWKRSA(
key=RSA256_KEY.public_key()._wrapped)
self.jwk512 = JWKRSA(key=RSA512_KEY.public_key())
self.jwk512json = {
'kty': 'RSA',
'e': 'AQAB',
'n': 'rHVztFHtH92ucFJD_N_HW9AsdRsUuHUBBBDlHwNlRd3fp5'
'80rv2-6QWE30cWgdmJS86ObRz6lUTor4R0T-3C5Q',
}
self.private = JWKRSA(key=RSA256_KEY)
self.private_json_small = self.jwk256json.copy()
self.private_json_small['d'] = (
'lPQED_EPTV0UIBfNI3KP2d9Jlrc2mrMllmf946bu-CE')
self.private_json = self.jwk256json.copy()
self.private_json.update({
'd': 'lPQED_EPTV0UIBfNI3KP2d9Jlrc2mrMllmf946bu-CE',
'p': 'zUVNZn4lLLBD1R6NE8TKNQ',
'q': 'wcfKfc7kl5jfqXArCRSURQ',
'dp': 'CWJFq43QvT5Bm5iN8n1okQ',
'dq': 'bHh2u7etM8LKKCF2pY2UdQ',
'qi': 'oi45cEkbVoJjAbnQpFY87Q',
})
self.jwk = self.private
def test_init_auto_comparable(self):
self.assertTrue(isinstance(
self.jwk256_not_comparable.key, util.ComparableRSAKey))
self.assertEqual(self.jwk256, self.jwk256_not_comparable)
def test_encode_param_zero(self):
from acme.jose.jwk import JWKRSA
# pylint: disable=protected-access
# TODO: move encode/decode _param to separate class
self.assertEqual('AA', JWKRSA._encode_param(0))
def test_equals(self):
self.assertEqual(self.jwk256, self.jwk256)
self.assertEqual(self.jwk512, self.jwk512)
def test_not_equals(self):
self.assertNotEqual(self.jwk256, self.jwk512)
self.assertNotEqual(self.jwk512, self.jwk256)
def test_load(self):
from acme.jose.jwk import JWKRSA
self.assertEqual(self.private, JWKRSA.load(
test_util.load_vector('rsa256_key.pem')))
def test_public_key(self):
self.assertEqual(self.jwk256, self.private.public_key())
def test_to_partial_json(self):
self.assertEqual(self.jwk256.to_partial_json(), self.jwk256json)
self.assertEqual(self.jwk512.to_partial_json(), self.jwk512json)
self.assertEqual(self.private.to_partial_json(), self.private_json)
def test_from_json(self):
from acme.jose.jwk import JWK
self.assertEqual(
self.jwk256, JWK.from_json(self.jwk256json))
self.assertEqual(
self.jwk512, JWK.from_json(self.jwk512json))
self.assertEqual(self.private, JWK.from_json(self.private_json))
def test_from_json_private_small(self):
from acme.jose.jwk import JWK
self.assertEqual(self.private, JWK.from_json(self.private_json_small))
def test_from_json_missing_one_additional(self):
from acme.jose.jwk import JWK
del self.private_json['q']
self.assertRaises(errors.Error, JWK.from_json, self.private_json)
def test_from_json_hashable(self):
from acme.jose.jwk import JWK
hash(JWK.from_json(self.jwk256json))
def test_from_json_non_schema_errors(self):
# valid against schema, but still failing
from acme.jose.jwk import JWK
self.assertRaises(errors.DeserializationError, JWK.from_json,
{'kty': 'RSA', 'e': 'AQAB', 'n': ''})
self.assertRaises(errors.DeserializationError, JWK.from_json,
{'kty': 'RSA', 'e': 'AQAB', 'n': '1'})
def test_thumbprint_go_jose(self):
# https://github.com/square/go-jose/blob/4ddd71883fa547d37fbf598071f04512d8bafee3/jwk.go#L155
# https://github.com/square/go-jose/blob/4ddd71883fa547d37fbf598071f04512d8bafee3/jwk_test.go#L331-L344
# https://github.com/square/go-jose/blob/4ddd71883fa547d37fbf598071f04512d8bafee3/jwk_test.go#L384
from acme.jose.jwk import JWKRSA
key = JWKRSA.json_loads("""{
"kty": "RSA",
"kid": "[email protected]",
"use": "sig",
"n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw",
"e": "AQAB"
}""")
self.assertEqual(
binascii.hexlify(key.thumbprint()),
b"f63838e96077ad1fc01c3f8405774dedc0641f558ebb4b40dccf5f9b6d66a932")
if __name__ == '__main__':
unittest.main() # pragma: no cover
| apache-2.0 |
cloud9UG/odoo | addons/account/test/test_parent_structure.py | 432 | 2108 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# TODO: move this in a YAML test with !python tag
#
import xmlrpclib
DB = 'training3'
USERID = 1
USERPASS = 'admin'
sock = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/object' % ('localhost',8069))
ids = sock.execute(DB, USERID, USERPASS, 'account.account', 'search', [], {})
account_lists = sock.execute(DB, USERID, USERPASS, 'account.account', 'read', ids, ['parent_id','parent_left','parent_right'])
accounts = dict(map(lambda x: (x['id'],x), account_lists))
for a in account_lists:
if a['parent_id']:
assert a['parent_left'] > accounts[a['parent_id'][0]]['parent_left']
assert a['parent_right'] < accounts[a['parent_id'][0]]['parent_right']
assert a['parent_left'] < a['parent_right']
for a2 in account_lists:
assert not ((a2['parent_right']>a['parent_left']) and
(a2['parent_left']<a['parent_left']) and
(a2['parent_right']<a['parent_right']))
if a2['parent_id']==a['id']:
assert (a2['parent_left']>a['parent_left']) and (a2['parent_right']<a['parent_right'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
osmc/osmc | package/mediacenter-addon-osmc/src/script.module.osmcsetting.services/resources/lib/osmcservices/osmc/osmc_setting.py | 2 | 1337 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2020 OSMC (KodeKarnage)
This file is part of script.module.osmcsetting.services
SPDX-License-Identifier: GPL-2.0-or-later
See LICENSES/GPL-2.0-or-later for more information.
"""
import os
import xbmcgui
from osmccommon import osmc_setting
from osmccommon.osmc_logging import StandardLogger
from ..services_gui import ServiceSelectionGui
addon_id = "script.module.osmcsetting.services"
log = StandardLogger(addon_id, os.path.basename(__file__)).log
class OSMCSettingClass(osmc_setting.OSMCSettingClass):
def __init__(self):
super(OSMCSettingClass, self).__init__()
self.addon_id = addon_id
self.short_name = 'Services'
self.short_name_i18n = 32058
self.description = 'Control OSMC services'
self.description_i18n = 32059
self.setting_data_method = {
'none': {
'setting_value': '',
}
}
self.reboot_required = False
def run(self):
xml = "ServiceBrowser_720OSMC.xml" \
if xbmcgui.Window(10000).getProperty("SkinHeight") == '720' \
else "ServiceBrowser_OSMC.xml"
creation = ServiceSelectionGui(xml, self.me.getAddonInfo('path'), 'Default', addon=self.me)
creation.doModal()
del creation
| gpl-2.0 |
hchen1202/django-react | virtualenv/lib/python3.6/site-packages/django/core/checks/security/sessions.py | 57 | 2781 | from django.conf import settings
from .. import Tags, Warning, register
from ..utils import patch_middleware_message
def add_session_cookie_message(message):
return message + (
" Using a secure-only session cookie makes it more difficult for "
"network traffic sniffers to hijack user sessions."
)
W010 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_SECURE to True."
),
id='security.W010',
)
W011 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE, but you have not set "
"SESSION_COOKIE_SECURE to True."
),
id='security.W011',
)
W012 = Warning(
add_session_cookie_message("SESSION_COOKIE_SECURE is not set to True."),
id='security.W012',
)
def add_httponly_message(message):
return message + (
" Using an HttpOnly session cookie makes it more difficult for "
"cross-site scripting attacks to hijack user sessions."
)
W013 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_HTTPONLY to True.",
),
id='security.W013',
)
W014 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE, but you have not set "
"SESSION_COOKIE_HTTPONLY to True."
),
id='security.W014',
)
W015 = Warning(
add_httponly_message("SESSION_COOKIE_HTTPONLY is not set to True."),
id='security.W015',
)
@register(Tags.security, deploy=True)
def check_session_cookie_secure(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_SECURE:
if _session_app():
errors.append(W010)
if _session_middleware():
errors.append(patch_middleware_message(W011))
if len(errors) > 1:
errors = [W012]
return errors
@register(Tags.security, deploy=True)
def check_session_cookie_httponly(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_HTTPONLY:
if _session_app():
errors.append(W013)
if _session_middleware():
errors.append(patch_middleware_message(W014))
if len(errors) > 1:
errors = [W015]
return errors
def _session_middleware():
return ("django.contrib.sessions.middleware.SessionMiddleware" in settings.MIDDLEWARE_CLASSES or
settings.MIDDLEWARE and "django.contrib.sessions.middleware.SessionMiddleware" in settings.MIDDLEWARE)
def _session_app():
return "django.contrib.sessions" in settings.INSTALLED_APPS
| mit |
da4089/wireshark | doc/extcap_example.py | 5 | 10476 | #!/usr/bin/env python
# Copyright 2014 Roland Knall <rknall [AT] gmail.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This is a generic example, which produces pcap packages every n seconds, and
is configurable via extcap options.
@note
{
To use this script on Windows, please generate an extcap_example.bat inside
the extcap folder, with the following content:
-------
@echo off
<Path to python interpreter> <Path to script file> %*
-------
Windows is not able to execute Python scripts directly, which also goes for all
other script-based formates beside VBScript
}
"""
import os
import sys
import signal
import re
import argparse
import time
import struct
import binascii
from threading import Thread
ERROR_USAGE = 0
ERROR_ARG = 1
ERROR_INTERFACE = 2
ERROR_FIFO = 3
doExit = False
globalinterface = 0
def signalHandler(signal, frame):
global doExit
doExit = True
#### EXTCAP FUNCTIONALITY
"""@brief Extcap configuration
This method prints the extcap configuration, which will be picked up by the
interface in Wireshark to present a interface specific configuration for
this extcap plugin
"""
def extcap_config(interface):
args = []
values = []
args.append ( (0, '--delay', 'Time delay', 'Time delay between packages', 'integer', '{range=1,15}{default=5}') )
args.append ( (1, '--message', 'Message', 'Package message content', 'string', '{required=true}') )
args.append ( (2, '--verify', 'Verify', 'Verify package content', 'boolflag', '{default=yes}') )
args.append ( (3, '--remote', 'Remote Channel', 'Remote Channel Selector', 'selector', ''))
args.append ( (4, '--fake_ip', 'Fake IP Address', 'Use this ip address as sender', 'string', '{save=false}{validation=\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\b}'))
args.append ( (5, '--ltest', 'Long Test', 'Long Test Value', 'long', '{default=123123123123123123}'))
args.append ( (6, '--d1test', 'Double 1 Test', 'Long Test Value', 'double', '{default=123.456}'))
args.append ( (7, '--d2test', 'Double 2 Test', 'Long Test Value', 'double', '{default= 123,456}'))
args.append ( (8, '--password', 'Password', 'Package message password', 'password', '') )
values.append ( (3, "if1", "Remote1", "true" ) )
values.append ( (3, "if2", "Remote2", "false" ) )
for arg in args:
print ("arg {number=%d}{call=%s}{display=%s}{tooltip=%s}{type=%s}%s" % arg)
for value in values:
print ("value {arg=%d}{value=%s}{display=%s}{default=%s}" % value)
def extcap_interfaces():
print ("extcap {version=1.0}")
print ("interface {value=example1}{display=Example interface usage for extcap}")
def extcap_dlts(interface):
if ( interface == '1' ):
print ("dlt {number=147}{name=USER0}{display=Demo Implementation for Extcap}")
"""
### FAKE DATA GENERATOR
Extcap capture routine
This routine simulates a capture by any kind of user defined device. The parameters
are user specified and must be handled by the extcap.
The data captured inside this routine is fake, so change this routine to present
your own input data, or call your own capture program via Popen for example. See
for more details.
"""
def unsigned(n):
return int(n) & 0xFFFFFFFF
def append_bytes(ba, blist):
for c in range(0, len(blist)):
ba.append(blist[c])
return ba
def pcap_fake_header():
header = bytearray()
header = append_bytes(header, struct.pack('<L', int ('a1b2c3d4', 16) ))
header = append_bytes(header, struct.pack('<H', unsigned(2)) ) # Pcap Major Version
header = append_bytes(header, struct.pack('<H', unsigned(4)) ) # Pcap Minor Version
header = append_bytes(header, struct.pack('<I', int(0))) # Timezone
header = append_bytes(header, struct.pack('<I', int(0))) # Accurancy of timestamps
header = append_bytes(header, struct.pack('<L', int ('0000ffff', 16) )) # Max Length of capture frame
header = append_bytes(header, struct.pack('<L', unsigned(1))) # Ethernet
return header
# Calculates and returns the IP checksum based on the given IP Header
def ip_checksum(iph):
#split into bytes
words = splitN(''.join(iph.split()),4)
csum = 0;
for word in words:
csum += int(word, base=16)
csum += (csum >> 16)
csum = csum & 0xFFFF ^ 0xFFFF
return csum
def pcap_fake_package ( message, fake_ip ):
pcap = bytearray()
#length = 14 bytes [ eth ] + 20 bytes [ ip ] + messagelength
caplength = len(message) + 14 + 20
timestamp = int(time.time())
pcap = append_bytes(pcap, struct.pack('<L', unsigned(timestamp) ) ) # timestamp seconds
pcap = append_bytes(pcap, struct.pack('<L', 0x00 ) ) # timestamp nanoseconds
pcap = append_bytes(pcap, struct.pack('<L', unsigned(caplength) ) ) # length captured
pcap = append_bytes(pcap, struct.pack('<L', unsigned(caplength) ) ) # length in frame
# ETH
pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac
pcap = append_bytes(pcap, struct.pack('<h', unsigned(8) )) # protocol (ip)
# IP
pcap = append_bytes(pcap, struct.pack('b', int ( '45', 16) )) # IP version
pcap = append_bytes(pcap, struct.pack('b', int ( '0', 16) )) #
pcap = append_bytes(pcap, struct.pack('>H', unsigned(len(message)+20) )) # length of data + payload
pcap = append_bytes(pcap, struct.pack('<H', int ( '0', 16) )) # Identification
pcap = append_bytes(pcap, struct.pack('b', int ( '40', 16) )) # Don't fragment
pcap = append_bytes(pcap, struct.pack('b', int ( '0', 16) )) # Fragment Offset
pcap = append_bytes(pcap, struct.pack('b', int ( '40', 16) ))
pcap = append_bytes(pcap, struct.pack('B', 0xFE )) # Protocol (2 = unspecified)
pcap = append_bytes(pcap, struct.pack('<H', int ( '0000', 16) )) # Checksum
parts = fake_ip.split('.')
ipadr = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
pcap = append_bytes(pcap, struct.pack('>L', ipadr )) # Source IP
pcap = append_bytes(pcap, struct.pack('>L', int ( '7F000001', 16) )) # Dest IP
pcap = append_bytes(pcap, message)
return pcap
def extcap_capture(interface, fifo, delay, verify, message, remote, fake_ip):
global doExit
signal.signal(signal.SIGINT, signalHandler)
signal.signal(signal.SIGTERM , signalHandler)
tdelay = delay if delay != 0 else 5
try:
os.stat(fifo)
except OSError:
doExit = True
print ( "Fifo does not exist, exiting!" )
fh = open(fifo, 'w+b', 0 )
fh.write (pcap_fake_header())
while doExit == False:
out = str( "%s|%04X%s|%s" % ( remote.strip(), len(message), message, verify ) )
try:
fh.write (pcap_fake_package(out, fake_ip))
time.sleep(tdelay)
except IOError:
doExit = True
fh.close()
####
def usage():
print ( "Usage: %s <--extcap-interfaces | --extcap-dlts | --extcap-interface | --extcap-config | --capture | --extcap-capture-filter | --fifo>" % sys.argv[0] )
if __name__ == '__main__':
interface = ""
# Capture options
delay = 0
message = ""
fake_ip = ""
parser = argparse.ArgumentParser(
prog="Extcap Example",
description="Extcap example program for python"
)
# Extcap Arguments
parser.add_argument("--capture", help="Start the capture routine", action="store_true" )
parser.add_argument("--extcap-interfaces", help="Provide a list of interfaces to capture from", action="store_true")
parser.add_argument("--extcap-interface", help="Provide the interface to capture from")
parser.add_argument("--extcap-dlts", help="Provide a list of dlts for the given interface", action="store_true")
parser.add_argument("--extcap-config", help="Provide a list of configurations for the given interface", action="store_true")
parser.add_argument("--extcap-capture-filter", help="Used together with capture to provide a capture filter")
parser.add_argument("--fifo", help="Use together with capture to provide the fifo to dump data to")
# Interface Arguments
parser.add_argument("--verify", help="Demonstrates a verification bool flag", action="store_true" )
parser.add_argument("--delay", help="Demonstrates an integer variable", type=int, default=0, choices=[0, 1, 2, 3, 4, 5] )
parser.add_argument("--remote", help="Demonstrates a selector choice", default="if1", choices=["if1", "if2"] )
parser.add_argument("--message", help="Demonstrates string variable", nargs='?', default="" )
parser.add_argument("--fake_ip", help="Add a fake sender IP adress", nargs='?', default="127.0.0.1" )
args, unknown = parser.parse_known_args()
if ( len(sys.argv) <= 1 ):
parser.exit("No arguments given!")
if ( args.extcap_interfaces == False and args.extcap_interface == None ):
parser.exit("An interface must be provided or the selection must be displayed")
if ( args.extcap_interfaces == True or args.extcap_interface == None ):
extcap_interfaces()
sys.exit(0)
if ( len(unknown) > 1 ):
print("Extcap Example %d unknown arguments given" % len(unknown) )
m = re.match ( 'example(\d+)', args.extcap_interface )
if not m:
sys.exit(ERROR_INTERFACE)
interface = m.group(1)
message = args.message
if ( args.message == None or len(args.message) == 0 ):
message = "Extcap Test"
fake_ip = args.fake_ip
if ( args.fake_ip == None or len(args.fake_ip) < 7 or len(args.fake_ip.split('.')) != 4 ):
fake_ip = "127.0.0.1"
if args.extcap_config:
extcap_config(interface)
elif args.extcap_dlts:
extcap_dlts(interface)
elif args.capture:
if args.fifo is None:
sys.exit(ERROR_FIFO)
extcap_capture(interface, args.fifo, args.delay, args.verify, message, args.remote, fake_ip)
else:
usage()
sys.exit(ERROR_USAGE)
| gpl-2.0 |
Skype4Py/Skype4Py | Skype4Py/lang/ru.py | 23 | 19299 | apiAttachAvailable = u'\u0412\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0441\u043e\u0435\u0434\u0438\u043d\u0435\u043d\u0438\u0435 \u0447\u0435\u0440\u0435\u0437 \u0418\u041f\u041f'
apiAttachNotAvailable = u'\u041d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u0435\u043d'
apiAttachPendingAuthorization = u'\u041e\u0436\u0438\u0434\u0430\u043d\u0438\u0435 \u0430\u0432\u0442\u043e\u0440\u0438\u0437\u0430\u0446\u0438\u0438'
apiAttachRefused = u'\u041e\u0442\u043a\u0430\u0437'
apiAttachSuccess = u'\u0423\u0434\u0430\u043b\u043e\u0441\u044c!'
apiAttachUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
budDeletedFriend = u'\u0423\u0434\u0430\u043b\u0435\u043d \u0438\u0437 \u0441\u043f\u0438\u0441\u043a\u0430 \u0434\u0440\u0443\u0437\u0435\u0439'
budFriend = u'\u0414\u0440\u0443\u0433'
budNeverBeenFriend = u'\u041d\u0438\u043a\u043e\u0433\u0434\u0430 \u043d\u0435 \u0431\u044b\u043b \u0432 \u0441\u043f\u0438\u0441\u043a\u0435 \u0434\u0440\u0443\u0437\u0435\u0439'
budPendingAuthorization = u'\u041e\u0436\u0438\u0434\u0430\u043d\u0438\u0435 \u0430\u0432\u0442\u043e\u0440\u0438\u0437\u0430\u0446\u0438\u0438'
budUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
cfrBlockedByRecipient = u'\u0417\u0432\u043e\u043d\u043e\u043a \u0437\u0430\u0431\u043b\u043e\u043a\u0438\u0440\u043e\u0432\u0430\u043d \u043f\u043e\u043b\u0443\u0447\u0430\u0442\u0435\u043b\u0435\u043c'
cfrMiscError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0441\u043c\u0435\u0448\u0430\u043d\u043d\u043e\u0433\u043e \u0442\u0438\u043f\u0430'
cfrNoCommonCodec = u'\u041d\u0435\u0442 \u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e\u0433\u043e \u043a\u043e\u0434\u0435\u043a\u0430'
cfrNoProxyFound = u'\u041f\u0440\u043e\u043a\u0441\u0438 \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d'
cfrNotAuthorizedByRecipient = u'\u0422\u0435\u043a\u0443\u0449\u0438\u0439 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u0430\u0432\u0442\u043e\u0440\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u043d \u043f\u043e\u043b\u0443\u0447\u0430\u0442\u0435\u043b\u0435\u043c'
cfrRecipientNotFriend = u'\u041f\u043e\u043b\u0443\u0447\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u0434\u0440\u0443\u0433'
cfrRemoteDeviceError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0437\u0432\u0443\u043a\u0430 \u0443 \u0430\u0431\u043e\u043d\u0435\u043d\u0442\u0430'
cfrSessionTerminated = u'\u0421\u0432\u044f\u0437\u044c \u0437\u0430\u043a\u043e\u043d\u0447\u0435\u043d\u0430'
cfrSoundIOError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0437\u0432\u0443\u043a\u0430'
cfrSoundRecordingError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0437\u0430\u043f\u0438\u0441\u0438 \u0437\u0432\u0443\u043a\u0430'
cfrUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
cfrUserDoesNotExist = u'\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c/\u043d\u043e\u043c\u0435\u0440 \u043d\u0435 \u0441\u0443\u0449\u0435\u0441\u0442\u0432\u0443\u0435\u0442'
cfrUserIsOffline = u'\u041e\u043d/\u041e\u043d\u0430 \u043d\u0435 \u0432 \u0441\u0435\u0442\u0438'
chsAllCalls = u'\u0423\u0441\u0442\u0430\u0440\u0435\u0432\u0448\u0430\u044f \u0432\u0435\u0440\u0441\u0438\u044f \u0434\u0438\u0430\u043b\u043e\u0433\u0430'
chsDialog = u'\u0414\u0438\u0430\u043b\u043e\u0433'
chsIncomingCalls = u'\u041e\u0436\u0438\u0434\u0430\u0435\u0442\u0441\u044f \u043f\u0440\u0438\u043d\u044f\u0442\u0438\u0435 \u043f\u0440\u0438\u0433\u043b\u0430\u0448\u0435\u043d\u0438\u044f \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u0438\u043c\u0438 \u0443\u0447\u0430\u0441\u0442\u043d\u0438\u043a\u0430\u043c\u0438'
chsLegacyDialog = u'\u0423\u0441\u0442\u0430\u0440\u0435\u0432\u0448\u0430\u044f \u0432\u0435\u0440\u0441\u0438\u044f \u0434\u0438\u0430\u043b\u043e\u0433\u0430'
chsMissedCalls = u'\u0414\u0438\u0430\u043b\u043e\u0433'
chsMultiNeedAccept = u'\u041e\u0436\u0438\u0434\u0430\u0435\u0442\u0441\u044f \u043f\u0440\u0438\u043d\u044f\u0442\u0438\u0435 \u043f\u0440\u0438\u0433\u043b\u0430\u0448\u0435\u043d\u0438\u044f \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u0438\u043c\u0438 \u0443\u0447\u0430\u0441\u0442\u043d\u0438\u043a\u0430\u043c\u0438'
chsMultiSubscribed = u'\u041d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0443\u0447\u0430\u0441\u0442\u043d\u0438\u043a\u043e\u0432 \u0432\u043e\u0448\u043b\u043e \u0432 \u0447\u0430\u0442'
chsOutgoingCalls = u'\u041d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0443\u0447\u0430\u0441\u0442\u043d\u0438\u043a\u043e\u0432 \u0432\u043e\u0448\u043b\u043e \u0432 \u0447\u0430\u0442'
chsUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
chsUnsubscribed = u'\u041d\u0435 \u044f\u0432\u043b\u044f\u0435\u0442\u0441\u044f \u0430\u0431\u043e\u043d\u0435\u043d\u0442\u043e\u043c'
clsBusy = u'\u0417\u0430\u043d\u044f\u0442'
clsCancelled = u'\u041e\u0442\u043c\u0435\u043d\u0438\u0442\u044c'
clsEarlyMedia = u'\u041f\u0440\u043e\u0438\u0433\u0440\u044b\u0432\u0430\u043d\u0438\u0435 \u043f\u0440\u0435\u0434\u0432\u0430\u0440\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0445 \u0441\u0438\u0433\u043d\u0430\u043b\u043e\u0432 (Early Media)'
clsFailed = u'\u041a \u0441\u043e\u0436\u0430\u043b\u0435\u043d\u0438\u044e, \u0437\u0432\u043e\u043d\u043e\u043a \u043d\u0435 \u0443\u0434\u0430\u043b\u0441\u044f'
clsFinished = u'\u041a\u043e\u043d\u0435\u0446'
clsInProgress = u'\u0418\u0434\u0435\u0442 \u0440\u0430\u0437\u0433\u043e\u0432\u043e\u0440'
clsLocalHold = u'\u041b\u043e\u043a\u0430\u043b\u044c\u043d\u043e\u0435 \u0443\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435 \u0437\u0432\u043e\u043d\u043a\u0430'
clsMissed = u'\u041f\u0440\u043e\u043f\u0443\u0449\u0435\u043d\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a'
clsOnHold = u'\u0412 \u043e\u0436\u0438\u0434\u0430\u043d\u0438\u0438'
clsRefused = u'\u041e\u0442\u043a\u0430\u0437'
clsRemoteHold = u'\u0423\u0434\u0430\u043b\u0435\u043d\u043d\u043e\u0435 \u0443\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435 \u0437\u0432\u043e\u043d\u043a\u0430'
clsRinging = u'\u0437\u0432\u043e\u043d\u0438\u0442'
clsRouting = u'\u041c\u0430\u0440\u0448\u0440\u0443\u0442\u0438\u0437\u0430\u0446\u0438\u044f'
clsTransferred = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
clsTransferring = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
clsUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
clsUnplaced = u'\u041d\u0435 \u0431\u044b\u043b \u043d\u0430\u0431\u0440\u0430\u043d'
clsVoicemailBufferingGreeting = u'\u0411\u0443\u0444\u0435\u0440\u0438\u0437\u0430\u0446\u0438\u044f \u043f\u0440\u0438\u0432\u0435\u0442\u0441\u0442\u0432\u0438\u044f'
clsVoicemailCancelled = u'\u0413\u043e\u043b\u043e\u0441\u043e\u0432\u043e\u0435 \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435 \u043e\u0442\u043c\u0435\u043d\u0435\u043d\u043e'
clsVoicemailFailed = u'\u0421\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435 \u043d\u0430 \u0430\u0432\u0442\u043e\u043e\u0442\u0432\u0435\u0442\u0447\u0438\u043a \u043d\u0435 \u0443\u0434\u0430\u043b\u043e\u0441\u044c'
clsVoicemailPlayingGreeting = u'\u041f\u0440\u043e\u0438\u0433\u0440\u044b\u0432\u0430\u043d\u0438\u0435 \u043f\u0440\u0438\u0432\u0435\u0442\u0441\u0442\u0432\u0438\u044f'
clsVoicemailRecording = u'\u0417\u0430\u043f\u0438\u0441\u044b\u0432\u0430\u0435\u043c \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435'
clsVoicemailSent = u'\u0413\u043e\u043b\u043e\u0441\u043e\u0432\u043e\u0435 \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435 \u043e\u0442\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u043e'
clsVoicemailUploading = u'\u0417\u0430\u0433\u0440\u0443\u0437\u043a\u0430 \u0433\u043e\u043b\u043e\u0441\u043e\u0432\u043e\u0433\u043e \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u044f'
cltIncomingP2P = u'\u0412\u0445\u043e\u0434\u044f\u0449\u0438\u0439 \u043f\u0438\u0440\u0438\u043d\u0433\u043e\u0432\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a'
cltIncomingPSTN = u'\u0412\u0445\u043e\u0434\u044f\u0449\u0438\u0439 \u0442\u0435\u043b\u0435\u0444\u043e\u043d\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a'
cltOutgoingP2P = u'\u0418\u0441\u0445\u043e\u0434\u044f\u0449\u0438\u0439 \u043f\u0438\u0440\u0438\u043d\u0433\u043e\u0432\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a'
cltOutgoingPSTN = u'\u0418\u0441\u0445\u043e\u0434\u044f\u0449\u0438\u0439 \u0442\u0435\u043b\u0435\u0444\u043e\u043d\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a'
cltUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
cmeAddedMembers = u'\u0414\u043e\u0431\u0430\u0432\u0438\u043b (-\u0430) \u043d\u043e\u0432\u044b\u0445 \u0443\u0447\u0430\u0441\u0442\u043d\u0438\u043a\u043e\u0432'
cmeCreatedChatWith = u'\u041d\u0430\u0447\u0430\u0442 \u0447\u0430\u0442 \u0441'
cmeEmoted = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
cmeLeft = u'\u0423\u0448\u0435\u043b'
cmeSaid = u'\u0421\u043a\u0430\u0437\u0430\u043b (-\u0430)'
cmeSawMembers = u'\u0412\u0438\u0434\u0435\u043b \u0443\u0447\u0430\u0441\u0442\u043d\u0438\u043a\u043e\u0432'
cmeSetTopic = u'\u041e\u043f\u0440\u0435\u0434\u0435\u043b\u0438\u043b \u0442\u0435\u043c\u0443'
cmeUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
cmsRead = u'\u041f\u0440\u043e\u0447\u0442\u0435\u043d\u043e'
cmsReceived = u'\u041f\u043e\u043b\u0443\u0447\u0435\u043d\u043e'
cmsSending = u'\u041e\u0442\u043f\u0440\u0430\u0432\u043a\u0430...'
cmsSent = u'\u041e\u0442\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u043e'
cmsUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
conConnecting = u'\u0421\u043e\u0435\u0434\u0438\u043d\u044f\u0435\u043c'
conOffline = u'\u041d\u0435 \u0432 \u0441\u0435\u0442\u0438'
conOnline = u'\u0412 \u0441\u0435\u0442\u0438'
conPausing = u'\u041f\u0430\u0443\u0437\u0430'
conUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
cusAway = u'\u041d\u0435\u0442 \u043d\u0430 \u043c\u0435\u0441\u0442\u0435'
cusDoNotDisturb = u'\u041d\u0435 \u0431\u0435\u0441\u043f\u043e\u043a\u043e\u0438\u0442\u044c'
cusInvisible = u'\u041d\u0435\u0432\u0438\u0434\u0438\u043c\u044b\u0439'
cusLoggedOut = u'\u041d\u0435 \u0432 \u0441\u0435\u0442\u0438'
cusNotAvailable = u'\u041d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u0435\u043d'
cusOffline = u'\u041d\u0435 \u0432 \u0441\u0435\u0442\u0438'
cusOnline = u'\u0412 \u0441\u0435\u0442\u0438'
cusSkypeMe = u'\u0421\u0432\u043e\u0431\u043e\u0434\u0435\u043d \u0434\u043b\u044f \u0440\u0430\u0437\u0433\u043e\u0432\u043e\u0440\u0430'
cusUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
cvsBothEnabled = u'\u041f\u0440\u0438\u043d\u0438\u043c\u0430\u0442\u044c \u0438 \u043f\u0435\u0440\u0435\u0434\u0430\u0432\u0430\u0442\u044c \u0432\u0438\u0434\u0435\u043e\u0442\u0440\u0430\u043d\u0441\u043b\u044f\u0446\u0438\u044e'
cvsNone = u'\u041d\u0435\u0442 \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0438'
cvsReceiveEnabled = u'\u041f\u0440\u0438\u043d\u0438\u043c\u0430\u0442\u044c \u0432\u0438\u0434\u0435\u043e\u0442\u0440\u0430\u043d\u0441\u043b\u044f\u0446\u0438\u044e'
cvsSendEnabled = u'\u041f\u0435\u0440\u0435\u0434\u0430\u0432\u0430\u0442\u044c \u0432\u0438\u0434\u0435\u043e\u0442\u0440\u0430\u043d\u0441\u043b\u044f\u0446\u0438\u044e'
cvsUnknown = u''
grpAllFriends = u'\u0412\u0441\u0435 \u0434\u0440\u0443\u0437\u044c\u044f'
grpAllUsers = u'\u0412\u0441\u0435 \u0430\u0431\u043e\u043d\u0435\u043d\u0442\u044b'
grpCustomGroup = u'\u041e\u0441\u043e\u0431\u044b\u0435'
grpOnlineFriends = u'\u0414\u0440\u0443\u0437\u044c\u044f \u0432 \u0441\u0435\u0442\u0438'
grpPendingAuthorizationFriends = u'\u041e\u0436\u0438\u0434\u0430\u043d\u0438\u0435 \u0430\u0432\u0442\u043e\u0440\u0438\u0437\u0430\u0446\u0438\u0438'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'\u041d\u0435\u0434\u0430\u0432\u043d\u043e \u043e\u0431\u0449\u0430\u043b\u0438\u0441\u044c'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'\u0414\u0440\u0443\u0437\u044c\u044f \u043f\u043e Skype'
grpSkypeOutFriends = u'\u0414\u0440\u0443\u0437\u044c\u044f \u043f\u043e SkypeOut'
grpUngroupedFriends = u'\u041d\u0435\u0433\u0440\u0443\u043f\u043f\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0435 \u0434\u0440\u0443\u0437\u044c\u044f'
grpUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
grpUsersAuthorizedByMe = u'\u0410\u0432\u0442\u043e\u0440\u0438\u0437\u043e\u0432\u0430\u043d\u043d\u044b\u0435 \u043c\u043d\u043e\u0439'
grpUsersBlockedByMe = u'\u0411\u043b\u043e\u043a\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0435 \u043c\u043d\u043e\u0439'
grpUsersWaitingMyAuthorization = u'\u041e\u0436\u0438\u0434\u0430\u044e\u0442 \u043c\u043e\u0435\u0439 \u0430\u0432\u0442\u043e\u0440\u0438\u0437\u0430\u0446\u0438\u0438'
leaAddDeclined = u'\u041f\u0440\u0438\u0433\u043b\u0430\u0448\u0435\u043d\u0438\u0435 \u043e\u0442\u043a\u043b\u043e\u043d\u0435\u043d\u043e'
leaAddedNotAuthorized = u'\u041f\u0440\u0438\u0433\u043b\u0430\u0448\u0430\u0435\u043c\u044b\u0439 \u0434\u043e\u043b\u0436\u0435\u043d \u0438\u043c\u0435\u0442\u044c \u0440\u0430\u0437\u0440\u0435\u0448\u0435\u043d\u0438\u0435'
leaAdderNotFriend = u'\u041f\u0440\u0438\u0433\u043b\u0430\u0448\u0430\u044e\u0449\u0438\u0439 \u0434\u043e\u043b\u0436\u0435\u043d \u0431\u044b\u0442\u044c \u0434\u0440\u0443\u0433\u043e\u043c'
leaUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
leaUnsubscribe = u'\u041d\u0435 \u044f\u0432\u043b\u044f\u0435\u0442\u0441\u044f \u0430\u0431\u043e\u043d\u0435\u043d\u0442\u043e\u043c'
leaUserIncapable = u'\u041d\u0435 \u043f\u043e\u0434\u0434\u0435\u0440\u0436\u0438\u0432\u0430\u0435\u0442\u0441\u044f \u0430\u0431\u043e\u043d\u0435\u043d\u0442\u043e\u043c'
leaUserNotFound = u'\u0410\u0431\u043e\u043d\u0435\u043d\u0442 \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d'
olsAway = u'\u041d\u0435\u0442 \u043d\u0430 \u043c\u0435\u0441\u0442\u0435'
olsDoNotDisturb = u'\u041d\u0435 \u0431\u0435\u0441\u043f\u043e\u043a\u043e\u0438\u0442\u044c'
olsNotAvailable = u'\u041d\u0435\u0434\u043e\u0441\u0442\u0443\u043f\u0435\u043d'
olsOffline = u'\u041d\u0435 \u0432 \u0441\u0435\u0442\u0438'
olsOnline = u'\u0412 \u0441\u0435\u0442\u0438'
olsSkypeMe = u'\u0421\u0432\u043e\u0431\u043e\u0434\u0435\u043d \u0434\u043b\u044f \u0440\u0430\u0437\u0433\u043e\u0432\u043e\u0440\u0430'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'\u0416\u0435\u043d\u0441\u043a\u0438\u0439'
usexMale = u'\u041c\u0443\u0436\u0441\u043a\u043e\u0439'
usexUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
vmrConnectError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0441\u043e\u0435\u0434\u0438\u043d\u0435\u043d\u0438\u044f'
vmrFileReadError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0447\u0442\u0435\u043d\u0438\u044f \u0444\u0430\u0439\u043b\u0430'
vmrFileWriteError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0437\u0430\u043f\u0438\u0441\u0438 \u0444\u0430\u0439\u043b\u0430'
vmrMiscError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0441\u043c\u0435\u0448\u0430\u043d\u043d\u043e\u0433\u043e \u0442\u0438\u043f\u0430'
vmrNoError = u'\u041d\u0435\u0442 \u043e\u0448\u0438\u0431\u043a\u0438'
vmrNoPrivilege = u'\u041d\u0435\u0442 \u043f\u0440\u0438\u0432\u0438\u043b\u0435\u0433\u0438\u0439 \u043d\u0430 \u0433\u043e\u043b\u043e\u0441\u043e\u0432\u0443\u044e \u043f\u043e\u0447\u0442\u0443'
vmrNoVoicemail = u'\u0422\u0430\u043a\u043e\u0433\u043e \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u044f \u043d\u0435\u0442'
vmrPlaybackError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0432\u043e\u0441\u043f\u0440\u043e\u0438\u0437\u0432\u0435\u0434\u0435\u043d\u0438\u044f'
vmrRecordingError = u'\u041e\u0448\u0438\u0431\u043a\u0430 \u0437\u0430\u043f\u0438\u0441\u0438'
vmrUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
vmsBlank = u'\u041f\u0443\u0441\u0442\u043e\u0435'
vmsBuffering = u'\u0411\u0443\u0444\u0435\u0440\u0438\u0437\u0438\u0440\u0443\u0435\u0442\u0441\u044f'
vmsDeleting = u'\u0423\u0434\u0430\u043b\u044f\u0435\u0442\u0441\u044f'
vmsDownloading = u'\u0417\u0430\u0433\u0440\u0443\u0436\u0430\u0435\u0442\u0441\u044f'
vmsFailed = u'\u041f\u0440\u043e\u0438\u0437\u043e\u0448\u0435\u043b \u0441\u0431\u043e\u0439'
vmsNotDownloaded = u'\u041d\u0435 \u0437\u0430\u0433\u0440\u0443\u0436\u0435\u043d\u043e'
vmsPlayed = u'\u041f\u0440\u043e\u0441\u043b\u0443\u0448\u0430\u043d\u043e'
vmsPlaying = u'\u041f\u0440\u043e\u0441\u043b\u0443\u0448\u0438\u0432\u0430\u0435\u0442\u0441\u044f'
vmsRecorded = u'\u0417\u0430\u043f\u0438\u0441\u0430\u043d\u043e'
vmsRecording = u'\u0417\u0430\u043f\u0438\u0441\u044b\u0432\u0430\u0435\u043c \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435'
vmsUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
vmsUnplayed = u'\u041d\u0435 \u043f\u0440\u043e\u0441\u043b\u0443\u0448\u0430\u043d\u043e'
vmsUploaded = u'\u041e\u0442\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u043e'
vmsUploading = u'\u041e\u0442\u043f\u0440\u0430\u0432\u043b\u044f\u0435\u0442\u0441\u044f'
vmtCustomGreeting = u'\u0421\u043f\u0435\u0446\u0438\u0430\u043b\u044c\u043d\u043e\u0435 \u043f\u0440\u0438\u0432\u0435\u0442\u0441\u0442\u0432\u0438\u0435'
vmtDefaultGreeting = u'\u0421\u0442\u0430\u043d\u0434\u0430\u0440\u0442\u043d\u043e\u0435 \u043f\u0440\u0438\u0432\u0435\u0442\u0441\u0442\u0432\u0438\u0435'
vmtIncoming = u'\u0432\u0445\u043e\u0434\u044f\u0449\u0435\u0435 \u0433\u043e\u043b\u043e\u0441\u043e\u0432\u043e\u0435 \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435'
vmtOutgoing = u'\u0418\u0441\u0445\u043e\u0434\u044f\u0449\u0435\u0435'
vmtUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
vssAvailable = u'\u0412\u043e\u0437\u043c\u043e\u0436\u0435\u043d'
vssNotAvailable = u'\u041d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u0435\u043d'
vssPaused = u'\u041f\u0430\u0443\u0437\u0430'
vssRejected = u'\u041e\u0442\u043a\u043b\u043e\u043d\u0435\u043d'
vssRunning = u'\u0412 \u043f\u0440\u043e\u0446\u0435\u0441\u0441\u0435'
vssStarting = u'\u041d\u0430\u0447\u0438\u043d\u0430\u0435\u0442\u0441\u044f'
vssStopping = u'\u0417\u0430\u043a\u0430\u043d\u0447\u0438\u0432\u0430\u0435\u0442\u0441\u044f'
vssUnknown = u'\u041d\u0435\u0438\u0437\u0432\u0435\u0441\u0442\u043d\u043e'
| bsd-3-clause |
amwelch/a10sdk-python | a10sdk/core/cgnv6/cgnv6_stateful_firewall_tcp_syn_timeout.py | 2 | 1325 | from a10sdk.common.A10BaseClass import A10BaseClass
class SynTimeout(A10BaseClass):
""" :param syn_timeout_val: {"description": "Set Seconds session can remain in half-open state before being deleted (default: 4 seconds)", "format": "number", "default": 4, "optional": true, "maximum": 30, "minimum": 2, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Configure TCP SYNtimeout.
Class syn-timeout supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/stateful-firewall/tcp/syn-timeout`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "syn-timeout"
self.a10_url="/axapi/v3/cgnv6/stateful-firewall/tcp/syn-timeout"
self.DeviceProxy = ""
self.syn_timeout_val = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 |
mikeywaites/flask-arrested | tests/test_endpoint.py | 1 | 10244 | import json
from mock import patch, MagicMock
from flask import Response
from werkzeug.exceptions import HTTPException
from arrested import (
ArrestedAPI, Resource,
Endpoint, ResponseHandler, GetListMixin,
CreateMixin, PutObjectMixin, PatchObjectMixin
)
from tests.utils import assertResponse
def test_get_endpoint_name():
"""test that user specified names are used by the get_name method
"""
class MyEndpoint(Endpoint):
name = 'test'
endpoint = MyEndpoint()
assert endpoint.get_name() == 'test'
def test_get_endpoint_name_defaults_to_class_name():
"""test the endpoint name will default to the classname if no custom name is provided
"""
class MyEndpoint(Endpoint):
name = None
endpoint = MyEndpoint()
assert endpoint.get_name() == 'myendpoint'
def test_make_response():
"""Test the default response returned by make_response method
"""
data = json.dumps({'foo': 'bar'})
resp = Endpoint().make_response(data)
exp_resp = Response(
response=data,
headers=None,
mimetype='application/json',
status=200
)
assertResponse(resp, exp_resp)
def test_make_response_set_mimetype():
"""test make response with a specific mimetype param passed to make_response method
"""
data = json.dumps({'foo': 'bar'})
resp = Endpoint().make_response(data, mime='text/html')
exp_resp = Response(
response=data,
headers=None,
mimetype='text/html',
status=200
)
assertResponse(resp, exp_resp)
def test_make_response_set_headers():
"""Test make_response with custom headers
"""
data = json.dumps({'foo': 'bar'})
resp = Endpoint().make_response(data, headers={'Cache-Control': 'must-revalidate'})
exp_resp = Response(
response=data,
headers={'Cache-Control': 'must-revalidate'},
mimetype='application/json',
status=200
)
assertResponse(resp, exp_resp)
def test_make_response_set_status():
"""Test make_response with no default status code
"""
data = json.dumps({'foo': 'bar'})
resp = Endpoint().make_response(data, status=201)
exp_resp = Response(
response=data,
headers=None,
mimetype='application/json',
status=201
)
assertResponse(resp, exp_resp)
def test_get_response_handler_with_custom_handler_params():
"""test get_response_handler method is passed custom params when
get_response_handler_params is overridden
"""
class FakeMapper(object):
pass
class MyEndpoint(Endpoint):
def get_response_handler_params(self, **params):
params = super(MyEndpoint, self).get_response_handler_params(**params)
params['mapper_class'] = FakeMapper
return params
handler = MyEndpoint().get_response_handler()
assert isinstance(handler, ResponseHandler)
assert handler.params == {'mapper_class': FakeMapper}
def test_get_response_handler_custom_handler():
class MyResponseHandler(ResponseHandler):
pass
class MyEndpoint(Endpoint):
response_handler = MyResponseHandler
handler = MyEndpoint().get_response_handler()
assert isinstance(handler, MyResponseHandler)
def test_get_request_handler():
pass
def test_get_request_handler_custom_handler():
pass
def test_return_error_response():
endpoint = Endpoint()
try:
endpoint.return_error(400, payload={'error': True})
except HTTPException as resp:
assert resp.code == 400
assert resp.response.data == b'{"error": true}'
def test_endpoint_dispatch_request_method_not_allowed_returns_error(app):
class MyEndpoint(Endpoint, GetListMixin):
methods = ["GET"]
def handle_get_request(self):
pass
with patch.object(MyEndpoint, 'return_error') as mock_return_error:
with app.test_request_context('/test', method='POST'):
MyEndpoint().dispatch_request()
mock_return_error.assert_called_once_with(405)
def test_get_calls_handle_get_request():
class MyEndpoint(Endpoint):
def handle_get_request(self):
pass
data = {'foo': 'bar'}
with patch.object(MyEndpoint, 'handle_get_request', return_value=data) as mocked:
MyEndpoint().get()
mocked.assert_called_once()
def test_post_calls_handle_post_request():
class MyEndpoint(Endpoint):
def handle_post_request(self):
pass
data = {'foo': 'bar'}
with patch.object(MyEndpoint, 'handle_post_request', return_value=data) as _mock:
MyEndpoint().post()
_mock.assert_called_once()
def test_put_calls_handle_put_request():
class MyEndpoint(Endpoint):
def handle_put_request(self):
pass
data = {'foo': 'bar'}
with patch.object(MyEndpoint, 'handle_put_request', return_value=data) as _mock:
MyEndpoint().put()
_mock.assert_called_once()
def test_patch_calls_handle_patch_request():
class MyEndpoint(Endpoint):
def handle_patch_request(self):
pass
data = {'foo': 'bar'}
with patch.object(MyEndpoint, 'handle_patch_request', return_value=data) as _mock:
MyEndpoint().patch()
_mock.assert_called_once()
def test_endpoint_dispatch_request_method_calls_process_before_request_hooks(app):
class MyEndpoint(Endpoint, GetListMixin):
methods = ["GET"]
def handle_get_request(self):
pass
with patch.object(Endpoint, 'process_before_request_hooks') as mock_before_hooks:
MyEndpoint().dispatch_request()
mock_before_hooks.assert_called_once_with()
def test_endpoint_dispatch_request_method_calls_process_after_request_hooks(app):
mock_response = MagicMock(spec=Response)
class MyEndpoint(Endpoint, GetListMixin):
methods = ["GET"]
def handle_get_request(self):
return mock_response
with patch.object(Endpoint, 'process_after_request_hooks') as mock_after_hooks:
MyEndpoint().dispatch_request()
mock_after_hooks.assert_called_once_with(mock_response)
def test_process_before_request_hooks_processed_in_order(app):
call_sequence = []
def test_hook(type_, sequence):
def hook(endpoint):
sequence.append(type_)
return hook
api_hook = test_hook('api', call_sequence)
resource_hook = test_hook('resource', call_sequence)
endpoint_hook = test_hook('endpoint', call_sequence)
endpoint_get_hook = test_hook('endpoint_get', call_sequence)
my_api = ArrestedAPI(before_all_hooks=[api_hook], url_prefix='/')
my_api.init_app(app)
my_resource = Resource('test', __name__, before_all_hooks=[resource_hook])
class MyEndpoint(Endpoint):
before_all_hooks = [endpoint_hook]
before_get_hooks = [endpoint_get_hook]
url = ''
my_resource.add_endpoint(Endpoint)
my_api.register_resource(my_resource)
endpoint = MyEndpoint()
endpoint.resource = my_resource
endpoint.meth = 'get'
endpoint.process_before_request_hooks()
assert call_sequence == ['api', 'resource', 'endpoint', 'endpoint_get']
def test_process_before_hooks_no_resource(app):
log_request = MagicMock(return_value=None)
class MyEndpoint(Endpoint, GetListMixin):
before_get_hooks = [log_request, ]
def get_objects(self):
return [{'foo': 'bar'}]
endpoint = MyEndpoint()
endpoint.resource = None
endpoint.meth = 'get'
endpoint.process_before_request_hooks()
log_request.assert_called_once()
def test_process_before_hooks_request_meth_not_handled(app):
log_request = MagicMock(return_value=None)
class MyEndpoint(Endpoint, GetListMixin):
before_get_hooks = [log_request, ]
def get_objects(self):
return [{'foo': 'bar'}]
endpoint = MyEndpoint()
endpoint.resource = None
endpoint.meth = 'options'
endpoint.process_before_request_hooks()
assert not log_request.called
def test_process_after_request_hooks_processed_in_order(app):
call_sequence = []
def test_hook(type_, sequence):
def hook(endpoint, resp):
sequence.append(type_)
return hook
api_hook = test_hook('api', call_sequence)
resource_hook = test_hook('resource', call_sequence)
endpoint_hook = test_hook('endpoint', call_sequence)
endpoint_get_hook = test_hook('endpoint_get', call_sequence)
my_api = ArrestedAPI(after_all_hooks=[api_hook], url_prefix='/')
my_api.init_app(app)
my_resource = Resource('test', __name__, after_all_hooks=[resource_hook])
class MyEndpoint(Endpoint):
after_all_hooks = [endpoint_hook]
after_get_hooks = [endpoint_get_hook]
url = ''
my_resource.add_endpoint(Endpoint)
my_api.register_resource(my_resource)
endpoint = MyEndpoint()
endpoint.resource = my_resource
endpoint.meth = 'get'
resp = MagicMock(spec=Response())
endpoint.process_after_request_hooks(resp)
assert call_sequence == ['endpoint_get', 'endpoint', 'resource', 'api']
def test_process_after_hooks_no_resource(app):
log_request = MagicMock(return_value=None)
class MyEndpoint(Endpoint, GetListMixin):
after_get_hooks = [log_request, ]
def get_objects(self):
return [{'foo': 'bar'}]
endpoint = MyEndpoint()
endpoint.resource = None
endpoint.meth = 'get'
resp = MagicMock(spec=Response())
endpoint.process_after_request_hooks(resp)
log_request.assert_called_once()
def test_process_after_hooks_request_meth_not_handled(app):
log_request = MagicMock(return_value=None)
class MyEndpoint(Endpoint, GetListMixin):
after_get_hooks = [log_request, ]
def get_objects(self):
return [{'foo': 'bar'}]
endpoint = MyEndpoint()
endpoint.resource = None
endpoint.meth = 'options'
resp = MagicMock(spec=Response())
endpoint.process_after_request_hooks(resp)
assert not log_request.called
def test_delete_calls_handle_delete_request():
pass
def test_restrict_methods():
pass
def test_endpoint_url():
pass
| mit |
linux-on-ibm-z/kubernetes | translations/extract.py | 137 | 3951 | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubectl/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubectl/pkg/cmd/util")', import_replace)
def string_flag_replace(match, file, line_number):
"""Replace a cmd.Flags().String("...", "", "...") with an internationalization
"""
sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2)))
STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace)
def long_string_replace(match, file, line_number):
return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3))
LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace)
EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace)
def replace(filename, matchers, multiline_matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
with open(filename, 'r') as datafile:
content = datafile.read()
for matcher in multiline_matchers:
match = matcher.regex.search(content)
while match:
rep = matcher.replace_fn(match, filename, 0)
# Escape back references in the replacement string
# (And escape for Python)
# (And escape for regex)
rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep)
content = matcher.regex.sub(rep, content, 1)
match = matcher.regex.search(content)
sys.stdout.write(content)
# gofmt the file again
from subprocess import call
call(["goimports", "-w", filename])
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH])
| apache-2.0 |
Bulochkin/tensorflow_pack | tensorflow/contrib/compiler/jit.py | 129 | 4433 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for controlling the Tensorflow/XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
_XLA_SCOPE_KEY = ("__xla_scope",)
class _XlaScope(object):
"""Keeps track of previous XLA scope calls, and depth of current call."""
def __init__(self, count, depth):
self.count = count
self.depth = depth
@contextlib.contextmanager
def experimental_jit_scope(compile_ops=True, separate_compiled_gradients=False):
"""Enable or disable JIT compilation of operators within the scope.
NOTE: This is an experimental feature.
The compilation is a hint and only supported on a best-effort basis.
Example usage:
with tf.contrib.compiler.experimental_jit_scope():
c = tf.matmul(a, b) # compiled
with tf.contrib.compiler.experimental_jit_scope(compile_ops=False):
d = tf.matmul(a, c) # not compiled
with tf.contrib.compiler.experimental_jit_scope(
compile_ops=lambda node_def: 'matmul' in node_def.op.lower()):
e = tf.matmul(a, b) + d # matmul is compiled, the addition is not.
Example of separate_compiled_gradients:
# In the example below, the computations for f, g and h will all be compiled
# in separate scopes.
with tf.contrib.compiler.experimental_jit_scope(
separate_compiled_gradients=True):
f = tf.matmul(a, b)
g = tf.gradients([f], [a, b], name='mygrads1')
h = tf.gradients([f], [a, b], name='mygrads2')
Args:
compile_ops: Whether to enable or disable compilation in the scope.
Either a Python bool, or a callable that accepts the parameter
`node_def` and returns a python bool.
separate_compiled_gradients: If true put each gradient subgraph into a
separate compilation scope. This gives fine-grained control over which
portions of the graph will be compiled as a single unit. Compiling
gradients separately may yield better performance for some graphs.
The scope is named based on the scope of the forward computation as well
as the name of the gradients. As a result, the gradients will be compiled
in a scope that is separate from both the forward computation, and from
other gradients.
Yields:
The current scope, enabling or disabling compilation.
"""
if callable(compile_ops):
def xla_compile(node_def):
return attr_value_pb2.AttrValue(b=compile_ops(node_def))
else:
xla_compile = attr_value_pb2.AttrValue(b=compile_ops)
attrs = {
"_XlaCompile":
xla_compile,
"_XlaSeparateCompiledGradients":
attr_value_pb2.AttrValue(b=bool(separate_compiled_gradients))
}
# Find the singleton counter for the current scoped graph. If it
# doesn't exist, create one.
xla_scope_counter = ops.get_collection(_XLA_SCOPE_KEY)
if not xla_scope_counter:
xla_scope_counter = _XlaScope(0, 0)
ops.add_to_collection(_XLA_SCOPE_KEY, xla_scope_counter)
else:
xla_scope_counter = xla_scope_counter[0]
if xla_scope_counter.depth == 0:
# If we're at the root xla scope, we can increase the counter so
# future calls to jit_scope use a different scope value.
# If we're already within a scope, we'll be fusing using the scope
# controlled by the parent.
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("jit_scope_%d" % xla_scope_counter.count).encode())
xla_scope_counter.count += 1
xla_scope_counter.depth += 1
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope(attrs):
yield
# pylint: enable=protected-access
xla_scope_counter.depth -= 1
| apache-2.0 |
jiangzhixiao/odoo | addons/account/report/account_financial_report.py | 380 | 6365 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from common_report_header import common_report_header
from openerp.tools.translate import _
from openerp.osv import osv
class report_account_common(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(report_account_common, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'get_lines': self.get_lines,
'time': time,
'get_fiscalyear': self._get_fiscalyear,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_filter': self._get_filter,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_target_move': self._get_target_move,
})
self.context = context
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
if (data['model'] == 'ir.ui.menu'):
new_ids = 'chart_account_id' in data['form'] and [data['form']['chart_account_id']] or []
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
return super(report_account_common, self).set_context(objects, data, new_ids, report_type=report_type)
def get_lines(self, data):
lines = []
account_obj = self.pool.get('account.account')
currency_obj = self.pool.get('res.currency')
ids2 = self.pool.get('account.financial.report')._get_children_by_order(self.cr, self.uid, [data['form']['account_report_id'][0]], context=data['form']['used_context'])
for report in self.pool.get('account.financial.report').browse(self.cr, self.uid, ids2, context=data['form']['used_context']):
vals = {
'name': report.name,
'balance': report.balance * report.sign or 0.0,
'type': 'report',
'level': bool(report.style_overwrite) and report.style_overwrite or report.level,
'account_type': report.type =='sum' and 'view' or False, #used to underline the financial report balances
}
if data['form']['debit_credit']:
vals['debit'] = report.debit
vals['credit'] = report.credit
if data['form']['enable_filter']:
vals['balance_cmp'] = self.pool.get('account.financial.report').browse(self.cr, self.uid, report.id, context=data['form']['comparison_context']).balance * report.sign or 0.0
lines.append(vals)
account_ids = []
if report.display_detail == 'no_detail':
#the rest of the loop is used to display the details of the financial report, so it's not needed here.
continue
if report.type == 'accounts' and report.account_ids:
account_ids = account_obj._get_children_and_consol(self.cr, self.uid, [x.id for x in report.account_ids])
elif report.type == 'account_type' and report.account_type_ids:
account_ids = account_obj.search(self.cr, self.uid, [('user_type','in', [x.id for x in report.account_type_ids])])
if account_ids:
for account in account_obj.browse(self.cr, self.uid, account_ids, context=data['form']['used_context']):
#if there are accounts to display, we add them to the lines with a level equals to their level in
#the COA + 1 (to avoid having them with a too low level that would conflicts with the level of data
#financial reports for Assets, liabilities...)
if report.display_detail == 'detail_flat' and account.type == 'view':
continue
flag = False
vals = {
'name': account.code + ' ' + account.name,
'balance': account.balance != 0 and account.balance * report.sign or account.balance,
'type': 'account',
'level': report.display_detail == 'detail_with_hierarchy' and min(account.level + 1,6) or 6, #account.level + 1
'account_type': account.type,
}
if data['form']['debit_credit']:
vals['debit'] = account.debit
vals['credit'] = account.credit
if not currency_obj.is_zero(self.cr, self.uid, account.company_id.currency_id, vals['balance']):
flag = True
if data['form']['enable_filter']:
vals['balance_cmp'] = account_obj.browse(self.cr, self.uid, account.id, context=data['form']['comparison_context']).balance * report.sign or 0.0
if not currency_obj.is_zero(self.cr, self.uid, account.company_id.currency_id, vals['balance_cmp']):
flag = True
if flag:
lines.append(vals)
return lines
class report_financial(osv.AbstractModel):
_name = 'report.account.report_financial'
_inherit = 'report.abstract_report'
_template = 'account.report_financial'
_wrapped_report_class = report_account_common
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lezzago/LambdaMart | lambdamart.py | 1 | 10588 | import numpy as np
import math
import random
import copy
from sklearn.tree import DecisionTreeRegressor
from multiprocessing import Pool
from RegressionTree import RegressionTree
import pandas as pd
import pickle
def dcg(scores):
"""
Returns the DCG value of the list of scores.
Parameters
----------
scores : list
Contains labels in a certain ranked order
Returns
-------
DCG_val: int
This is the value of the DCG on the given scores
"""
return np.sum([
(np.power(2, scores[i]) - 1) / np.log2(i + 2)
for i in xrange(len(scores))
])
def dcg_k(scores, k):
"""
Returns the DCG value of the list of scores and truncates to k values.
Parameters
----------
scores : list
Contains labels in a certain ranked order
k : int
In the amount of values you want to only look at for computing DCG
Returns
-------
DCG_val: int
This is the value of the DCG on the given scores
"""
return np.sum([
(np.power(2, scores[i]) - 1) / np.log2(i + 2)
for i in xrange(len(scores[:k]))
])
def ideal_dcg(scores):
"""
Returns the Ideal DCG value of the list of scores.
Parameters
----------
scores : list
Contains labels in a certain ranked order
Returns
-------
Ideal_DCG_val: int
This is the value of the Ideal DCG on the given scores
"""
scores = [score for score in sorted(scores)[::-1]]
return dcg(scores)
def ideal_dcg_k(scores, k):
"""
Returns the Ideal DCG value of the list of scores and truncates to k values.
Parameters
----------
scores : list
Contains labels in a certain ranked order
k : int
In the amount of values you want to only look at for computing DCG
Returns
-------
Ideal_DCG_val: int
This is the value of the Ideal DCG on the given scores
"""
scores = [score for score in sorted(scores)[::-1]]
return dcg_k(scores, k)
def single_dcg(scores, i, j):
"""
Returns the DCG value at a single point.
Parameters
----------
scores : list
Contains labels in a certain ranked order
i : int
This points to the ith value in scores
j : int
This sets the ith value in scores to be the jth rank
Returns
-------
Single_DCG: int
This is the value of the DCG at a single point
"""
return (np.power(2, scores[i]) - 1) / np.log2(j + 2)
def compute_lambda(args):
"""
Returns the lambda and w values for a given query.
Parameters
----------
args : zipped value of true_scores, predicted_scores, good_ij_pairs, idcg, query_key
Contains a list of the true labels of documents, list of the predicted labels of documents,
i and j pairs where true_score[i] > true_score[j], idcg values, and query keys.
Returns
-------
lambdas : numpy array
This contains the calculated lambda values
w : numpy array
This contains the computed w values
query_key : int
This is the query id these values refer to
"""
true_scores, predicted_scores, good_ij_pairs, idcg, query_key = args
num_docs = len(true_scores)
sorted_indexes = np.argsort(predicted_scores)[::-1]
rev_indexes = np.argsort(sorted_indexes)
true_scores = true_scores[sorted_indexes]
predicted_scores = predicted_scores[sorted_indexes]
lambdas = np.zeros(num_docs)
w = np.zeros(num_docs)
single_dcgs = {}
for i,j in good_ij_pairs:
if (i,i) not in single_dcgs:
single_dcgs[(i,i)] = single_dcg(true_scores, i, i)
single_dcgs[(i,j)] = single_dcg(true_scores, i, j)
if (j,j) not in single_dcgs:
single_dcgs[(j,j)] = single_dcg(true_scores, j, j)
single_dcgs[(j,i)] = single_dcg(true_scores, j, i)
for i,j in good_ij_pairs:
z_ndcg = abs(single_dcgs[(i,j)] - single_dcgs[(i,i)] + single_dcgs[(j,i)] - single_dcgs[(j,j)]) / idcg
rho = 1 / (1 + np.exp(predicted_scores[i] - predicted_scores[j]))
rho_complement = 1.0 - rho
lambda_val = z_ndcg * rho
lambdas[i] += lambda_val
lambdas[j] -= lambda_val
w_val = rho * rho_complement * z_ndcg
w[i] += w_val
w[j] += w_val
return lambdas[rev_indexes], w[rev_indexes], query_key
def group_queries(training_data, qid_index):
"""
Returns a dictionary that groups the documents by their query ids.
Parameters
----------
training_data : Numpy array of lists
Contains a list of document information. Each document's format is [relevance score, query index, feature vector]
qid_index : int
This is the index where the qid is located in the training data
Returns
-------
query_indexes : dictionary
The keys were the different query ids and teh values were the indexes in the training data that are associated of those keys.
"""
query_indexes = {}
index = 0
for record in training_data:
query_indexes.setdefault(record[qid_index], [])
query_indexes[record[qid_index]].append(index)
index += 1
return query_indexes
def get_pairs(scores):
"""
Returns pairs of indexes where the first value in the pair has a higher score than the second value in the pair.
Parameters
----------
scores : list of int
Contain a list of numbers
Returns
-------
query_pair : list of pairs
This contains a list of pairs of indexes in scores.
"""
query_pair = []
for query_scores in scores:
temp = sorted(query_scores, reverse=True)
pairs = []
for i in xrange(len(temp)):
for j in xrange(len(temp)):
if temp[i] > temp[j]:
pairs.append((i,j))
query_pair.append(pairs)
return query_pair
class LambdaMART:
def __init__(self, training_data=None, number_of_trees=5, learning_rate=0.1, tree_type='sklearn'):
"""
This is the constructor for the LambdaMART object.
Parameters
----------
training_data : list of int
Contain a list of numbers
number_of_trees : int (default: 5)
Number of trees LambdaMART goes through
learning_rate : float (default: 0.1)
Rate at which we update our prediction with each tree
tree_type : string (default: "sklearn")
Either "sklearn" for using Sklearn implementation of the tree of "original"
for using our implementation
"""
if tree_type != 'sklearn' and tree_type != 'original':
raise ValueError('The "tree_type" must be "sklearn" or "original"')
self.training_data = training_data
self.number_of_trees = number_of_trees
self.learning_rate = learning_rate
self.trees = []
self.tree_type = tree_type
def fit(self):
"""
Fits the model on the training data.
"""
predicted_scores = np.zeros(len(self.training_data))
query_indexes = group_queries(self.training_data, 1)
query_keys = query_indexes.keys()
true_scores = [self.training_data[query_indexes[query], 0] for query in query_keys]
good_ij_pairs = get_pairs(true_scores)
tree_data = pd.DataFrame(self.training_data[:, 2:7])
labels = self.training_data[:, 0]
# ideal dcg calculation
idcg = [ideal_dcg(scores) for scores in true_scores]
for k in xrange(self.number_of_trees):
print 'Tree %d' % (k)
lambdas = np.zeros(len(predicted_scores))
w = np.zeros(len(predicted_scores))
pred_scores = [predicted_scores[query_indexes[query]] for query in query_keys]
pool = Pool()
for lambda_val, w_val, query_key in pool.map(compute_lambda, zip(true_scores, pred_scores, good_ij_pairs, idcg, query_keys), chunksize=1):
indexes = query_indexes[query_key]
lambdas[indexes] = lambda_val
w[indexes] = w_val
pool.close()
if self.tree_type == 'sklearn':
# Sklearn implementation of the tree
tree = DecisionTreeRegressor(max_depth=50)
tree.fit(self.training_data[:,2:], lambdas)
self.trees.append(tree)
prediction = tree.predict(self.training_data[:,2:])
predicted_scores += prediction * self.learning_rate
elif self.tree_type == 'original':
# Our implementation of the tree
tree = RegressionTree(tree_data, lambdas, max_depth=10, ideal_ls= 0.001)
tree.fit()
prediction = tree.predict(self.training_data[:,2:])
predicted_scores += prediction * self.learning_rate
def predict(self, data):
"""
Predicts the scores for the test dataset.
Parameters
----------
data : Numpy array of documents
Numpy array of documents with each document's format is [query index, feature vector]
Returns
-------
predicted_scores : Numpy array of scores
This contains an array or the predicted scores for the documents.
"""
data = np.array(data)
query_indexes = group_queries(data, 0)
predicted_scores = np.zeros(len(data))
for query in query_indexes:
results = np.zeros(len(query_indexes[query]))
for tree in self.trees:
results += self.learning_rate * tree.predict(data[query_indexes[query], 1:])
predicted_scores[query_indexes[query]] = results
return predicted_scores
def validate(self, data, k):
"""
Predicts the scores for the test dataset and calculates the NDCG value.
Parameters
----------
data : Numpy array of documents
Numpy array of documents with each document's format is [relevance score, query index, feature vector]
k : int
this is used to compute the NDCG@k
Returns
-------
average_ndcg : float
This is the average NDCG value of all the queries
predicted_scores : Numpy array of scores
This contains an array or the predicted scores for the documents.
"""
data = np.array(data)
query_indexes = group_queries(data, 1)
average_ndcg = []
predicted_scores = np.zeros(len(data))
for query in query_indexes:
results = np.zeros(len(query_indexes[query]))
for tree in self.trees:
results += self.learning_rate * tree.predict(data[query_indexes[query], 2:])
predicted_sorted_indexes = np.argsort(results)[::-1]
t_results = data[query_indexes[query], 0]
t_results = t_results[predicted_sorted_indexes]
predicted_scores[query_indexes[query]] = results
dcg_val = dcg_k(t_results, k)
idcg_val = ideal_dcg_k(t_results, k)
ndcg_val = (dcg_val / idcg_val)
average_ndcg.append(ndcg_val)
average_ndcg = np.nanmean(average_ndcg)
return average_ndcg, predicted_scores
def save(self, fname):
"""
Saves the model into a ".lmart" file with the name given as a parameter.
Parameters
----------
fname : string
Filename of the file you want to save
"""
pickle.dump(self, open('%s.lmart' % (fname), "wb"), protocol=2)
def load(self, fname):
"""
Loads the model from the ".lmart" file given as a parameter.
Parameters
----------
fname : string
Filename of the file you want to load
"""
model = pickle.load(open(fname , "rb"))
self.training_data = model.training_data
self.number_of_trees = model.number_of_trees
self.tree_type = model.tree_type
self.learning_rate = model.learning_rate
self.trees = model.trees | mit |
Lkhagvadelger/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py | 119 | 71750 | # Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2011 Daniel Bates ([email protected]). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import atexit
import base64
import codecs
import getpass
import os
import os.path
import re
import stat
import sys
import subprocess
import tempfile
import time
import unittest2 as unittest
import urllib
import shutil
from datetime import date
from webkitpy.common.checkout.checkout import Checkout
from webkitpy.common.config.committers import Committer # FIXME: This should not be needed
from webkitpy.common.net.bugzilla import Attachment # FIXME: This should not be needed
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive_mock import MockExecutive
from .git import Git, AmbiguousCommitError
from .detection import detect_scm_system
from .scm import SCM, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError
from .svn import SVN
# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
# We store it in a global variable so that we can delete this cached repo on exit(3).
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
cached_svn_repo_path = None
def remove_dir(path):
# Change directory to / to ensure that we aren't in the directory we want to delete.
os.chdir('/')
shutil.rmtree(path)
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
@atexit.register
def delete_cached_mock_repo_at_exit():
if cached_svn_repo_path:
remove_dir(cached_svn_repo_path)
# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
def run_command(*args, **kwargs):
# FIXME: This should not be a global static.
# New code should use Executive.run_command directly instead
return Executive().run_command(*args, **kwargs)
# FIXME: This should be unified into one of the executive.py commands!
# Callers could use run_and_throw_if_fail(args, cwd=cwd, quiet=True)
def run_silent(args, cwd=None):
# Note: Not thread safe: http://bugs.python.org/issue2320
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process.communicate() # ignore output
exit_code = process.wait()
if exit_code:
raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
def write_into_file_at_path(file_path, contents, encoding="utf-8"):
if encoding:
with codecs.open(file_path, "w", encoding) as file:
file.write(contents)
else:
with open(file_path, "w") as file:
file.write(contents)
def read_from_path(file_path, encoding="utf-8"):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
def _make_diff(command, *args):
# We use this wrapper to disable output decoding. diffs should be treated as
# binary files since they may include text files of multiple differnet encodings.
# FIXME: This should use an Executive.
return run_command([command, "diff"] + list(args), decode_output=False)
def _svn_diff(*args):
return _make_diff("svn", *args)
def _git_diff(*args):
return _make_diff("git", *args)
# Exists to share svn repository creation code between the git and svn tests
class SVNTestRepository(object):
@classmethod
def _svn_add(cls, path):
run_command(["svn", "add", path])
@classmethod
def _svn_commit(cls, message):
run_command(["svn", "commit", "--quiet", "--message", message])
@classmethod
def _setup_test_commits(cls, svn_repo_url):
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Add some test commits
os.chdir(svn_checkout_path)
write_into_file_at_path("test_file", "test1")
cls._svn_add("test_file")
cls._svn_commit("initial commit")
write_into_file_at_path("test_file", "test1test2")
# This used to be the last commit, but doing so broke
# GitTest.test_apply_git_patch which use the inverse diff of the last commit.
# svn-apply fails to remove directories in Git, see:
# https://bugs.webkit.org/show_bug.cgi?id=34871
os.mkdir("test_dir")
# Slash should always be the right path separator since we use cygwin on Windows.
test_file3_path = "test_dir/test_file3"
write_into_file_at_path(test_file3_path, "third file")
cls._svn_add("test_dir")
cls._svn_commit("second commit")
write_into_file_at_path("test_file", "test1test2test3\n")
write_into_file_at_path("test_file2", "second file")
cls._svn_add("test_file2")
cls._svn_commit("third commit")
# This 4th commit is used to make sure that our patch file handling
# code correctly treats patches as binary and does not attempt to
# decode them assuming they're utf-8.
write_into_file_at_path("test_file", u"latin1 test: \u00A0\n", "latin1")
write_into_file_at_path("test_file2", u"utf-8 test: \u00A0\n", "utf-8")
cls._svn_commit("fourth commit")
# svn does not seem to update after commit as I would expect.
run_command(['svn', 'update'])
remove_dir(svn_checkout_path)
# This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
# GitTest. We create a mock SVN repo once and then perform an SVN checkout from a filesystem copy of
# it since it's expensive to create the mock repo.
@classmethod
def setup(cls, test_object):
global cached_svn_repo_path
if not cached_svn_repo_path:
cached_svn_repo_path = cls._setup_mock_repo()
test_object.temp_directory = tempfile.mkdtemp(suffix="svn_test")
test_object.svn_repo_path = os.path.join(test_object.temp_directory, "repo")
test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path
test_object.svn_checkout_path = os.path.join(test_object.temp_directory, "checkout")
shutil.copytree(cached_svn_repo_path, test_object.svn_repo_path)
run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url + "/trunk", test_object.svn_checkout_path])
@classmethod
def _setup_mock_repo(cls):
# Create an test SVN repository
svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
svn_repo_url = "file://%s" % svn_repo_path # Not sure this will work on windows
# git svn complains if we don't pass --pre-1.5-compatible, not sure why:
# Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
run_command(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
# Create a test svn checkout
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations
os.chdir(svn_checkout_path)
os.mkdir('trunk')
cls._svn_add('trunk')
# We can add tags and branches as well if we ever need to test those.
cls._svn_commit('add trunk')
# Change directory out of the svn checkout so we can delete the checkout directory.
remove_dir(svn_checkout_path)
cls._setup_test_commits(svn_repo_url + "/trunk")
return svn_repo_path
@classmethod
def tear_down(cls, test_object):
remove_dir(test_object.temp_directory)
# Now that we've deleted the checkout paths, cwddir may be invalid
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
if os.path.isabs(__file__):
path = os.path.dirname(__file__)
else:
path = sys.path[0]
os.chdir(detect_scm_system(path).checkout_root)
# For testing the SCM baseclass directly.
class SCMClassTests(unittest.TestCase):
def setUp(self):
self.dev_null = open(os.devnull, "w") # Used to make our Popen calls quiet.
def tearDown(self):
self.dev_null.close()
def test_run_command_with_pipe(self):
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
# Test the non-pipe case too:
self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
command_returns_non_zero = ['/bin/sh', '--invalid-option']
# Test when the input pipe process fails.
input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertNotEqual(input_process.poll(), 0)
self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
# Test when the run_command process fails.
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
def test_error_handlers(self):
git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
svn_failure_message="""svn: Commit failed (details follow):
svn: File or directory 'ChangeLog' is out of date; try updating
svn: resource out of date; try updating
"""
command_does_not_exist = ['does_not_exist', 'invalid_option']
self.assertRaises(OSError, run_command, command_does_not_exist)
self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
command_returns_non_zero = ['/bin/sh', '--invalid-option']
self.assertRaises(ScriptError, run_command, command_returns_non_zero)
# Check if returns error text:
self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
# GitTest and SVNTest inherit from this so any test_ methods here will be run once for this class and then once for each subclass.
class SCMTest(unittest.TestCase):
def _create_patch(self, patch_contents):
# FIXME: This code is brittle if the Attachment API changes.
attachment = Attachment({"bug_id": 12345}, None)
attachment.contents = lambda: patch_contents
joe_cool = Committer("Joe Cool", "[email protected]")
attachment.reviewer = lambda: joe_cool
return attachment
def _setup_webkittools_scripts_symlink(self, local_scm):
webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
webkit_scripts_directory = webkit_scm.scripts_directory()
local_scripts_directory = local_scm.scripts_directory()
os.mkdir(os.path.dirname(local_scripts_directory))
os.symlink(webkit_scripts_directory, local_scripts_directory)
# Tests which both GitTest and SVNTest should run.
# FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses
def _shared_test_changed_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertItemsEqual(self.scm.changed_files(), ["test_file"])
write_into_file_at_path("test_dir/test_file3", "new stuff")
self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
old_cwd = os.getcwd()
os.chdir("test_dir")
# Validate that changed_files does not change with our cwd, see bug 37015.
self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
os.chdir(old_cwd)
def _shared_test_added_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertItemsEqual(self.scm.added_files(), [])
write_into_file_at_path("added_file", "new stuff")
self.scm.add("added_file")
write_into_file_at_path("added_file3", "more new stuff")
write_into_file_at_path("added_file4", "more new stuff")
self.scm.add_list(["added_file3", "added_file4"])
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file2", "new stuff")
self.scm.add("added_dir")
# SVN reports directory changes, Git does not.
added_files = self.scm.added_files()
if "added_dir" in added_files:
added_files.remove("added_dir")
self.assertItemsEqual(added_files, ["added_dir/added_file2", "added_file", "added_file3", "added_file4"])
# Test also to make sure discard_working_directory_changes removes added files
self.scm.discard_working_directory_changes()
self.assertItemsEqual(self.scm.added_files(), [])
self.assertFalse(os.path.exists("added_file"))
self.assertFalse(os.path.exists("added_file3"))
self.assertFalse(os.path.exists("added_file4"))
self.assertFalse(os.path.exists("added_dir"))
def _shared_test_changed_files_for_revision(self):
# SVN reports directory changes, Git does not.
changed_files = self.scm.changed_files_for_revision(3)
if "test_dir" in changed_files:
changed_files.remove("test_dir")
self.assertItemsEqual(changed_files, ["test_dir/test_file3", "test_file"])
self.assertItemsEqual(self.scm.changed_files_for_revision(4), ["test_file", "test_file2"]) # Git and SVN return different orders.
self.assertItemsEqual(self.scm.changed_files_for_revision(2), ["test_file"])
def _shared_test_contents_at_revision(self):
self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2")
self.assertEqual(self.scm.contents_at_revision("test_file", 4), "test1test2test3\n")
# Verify that contents_at_revision returns a byte array, aka str():
self.assertEqual(self.scm.contents_at_revision("test_file", 5), u"latin1 test: \u00A0\n".encode("latin1"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 5), u"utf-8 test: \u00A0\n".encode("utf-8"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 4), "second file")
# Files which don't exist:
# Currently we raise instead of returning None because detecting the difference between
# "file not found" and any other error seems impossible with svn (git seems to expose such through the return code).
self.assertRaises(ScriptError, self.scm.contents_at_revision, "test_file2", 2)
self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2)
def _shared_test_revisions_changing_file(self):
self.assertItemsEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file")
def _shared_test_committer_email_for_revision(self):
self.assertEqual(self.scm.committer_email_for_revision(3), getpass.getuser()) # Committer "email" will be the current user
def _shared_test_reverse_diff(self):
self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
# Only test the simple case, as any other will end up with conflict markers.
self.scm.apply_reverse_diff('5')
self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
def _shared_test_diff_for_revision(self):
# Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
r3_patch = self.scm.diff_for_revision(4)
self.assertRegexpMatches(r3_patch, 'test3')
self.assertNotRegexpMatches(r3_patch, 'test4')
self.assertRegexpMatches(r3_patch, 'test2')
self.assertRegexpMatches(self.scm.diff_for_revision(3), 'test2')
def _shared_test_svn_apply_git_patch(self):
self._setup_webkittools_scripts_symlink(self.scm)
git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
new file mode 100644
index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
60151690
GIT binary patch
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
literal 0
HcmV?d00001
"""
self.checkout.apply_patch(self._create_patch(git_binary_addition))
added = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual(512, len(added))
self.assertTrue(added.startswith('GIF89a'))
self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# The file already exists.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition))
git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
GIT binary patch
literal 7
OcmYex&reD$;sO8*F9L)B
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
"""
self.checkout.apply_patch(self._create_patch(git_binary_modification))
modified = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual('foobar\n', modified)
self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# Applying the same modification should fail.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification))
git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
deleted file mode 100644
index 323fae0..0000000
GIT binary patch
literal 0
HcmV?d00001
literal 7
OcmYex&reD$;sO8*F9L)B
"""
self.checkout.apply_patch(self._create_patch(git_binary_deletion))
self.assertFalse(os.path.exists('fizzbuzz7.gif'))
self.assertNotIn('fizzbuzz7.gif', self.scm.changed_files())
# Cannot delete again.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion))
def _shared_test_add_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
def _shared_test_delete_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertNotIn("added_dir", self.scm.added_files())
def _shared_test_delete_recursively_or_not(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
write_into_file_at_path("added_dir/another_added_file", "more new stuff")
self.scm.add("added_dir/added_file")
self.scm.add("added_dir/another_added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
self.assertIn("added_dir/another_added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertIn("added_dir/another_added_file", self.scm.added_files())
def _shared_test_exists(self, scm, commit_function):
os.chdir(scm.checkout_root)
self.assertFalse(scm.exists('foo.txt'))
write_into_file_at_path('foo.txt', 'some stuff')
self.assertFalse(scm.exists('foo.txt'))
scm.add('foo.txt')
commit_function('adding foo')
self.assertTrue(scm.exists('foo.txt'))
scm.delete('foo.txt')
commit_function('deleting foo')
self.assertFalse(scm.exists('foo.txt'))
def _shared_test_head_svn_revision(self):
self.assertEqual(self.scm.head_svn_revision(), '5')
# Context manager that overrides the current timezone.
class TimezoneOverride(object):
def __init__(self, timezone_string):
self._timezone_string = timezone_string
def __enter__(self):
if hasattr(time, 'tzset'):
self._saved_timezone = os.environ.get('TZ', None)
os.environ['TZ'] = self._timezone_string
time.tzset()
def __exit__(self, type, value, traceback):
if hasattr(time, 'tzset'):
if self._saved_timezone:
os.environ['TZ'] = self._saved_timezone
else:
del os.environ['TZ']
time.tzset()
class SVNTest(SCMTest):
@staticmethod
def _set_date_and_reviewer(changelog_entry):
# Joe Cool matches the reviewer set in SCMTest._create_patch
changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
# svn-apply will update ChangeLog entries with today's date (as in Cupertino, CA, US)
with TimezoneOverride('PST8PDT'):
return changelog_entry.replace('DATE_HERE', date.today().isoformat())
def test_svn_apply(self):
first_entry = """2009-10-26 Eric Seidel <[email protected]>
Reviewed by Foo Bar.
Most awesome change ever.
* scm_unittest.py:
"""
intermediate_entry = """2009-10-27 Eric Seidel <[email protected]>
Reviewed by Baz Bar.
A more awesomer change yet!
* scm_unittest.py:
"""
one_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -1,5 +1,13 @@
2009-10-26 Eric Seidel <[email protected]>
%(whitespace)s
+ Reviewed by NOBODY (OOPS!).
+
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <[email protected]>
+
Reviewed by Foo Bar.
%(whitespace)s
Most awesome change ever.
""" % {'whitespace': ' '}
one_line_overlap_entry = """DATE_HERE Eric Seidel <[email protected]>
Reviewed by REVIEWER_HERE.
Second most awesome change ever.
* scm_unittest.py:
"""
two_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -2,6 +2,14 @@
%(whitespace)s
Reviewed by Foo Bar.
%(whitespace)s
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <[email protected]>
+
+ Reviewed by Foo Bar.
+
Most awesome change ever.
%(whitespace)s
* scm_unittest.py:
""" % {'whitespace': ' '}
two_line_overlap_entry = """DATE_HERE Eric Seidel <[email protected]>
Reviewed by Foo Bar.
Second most awesome change ever.
* scm_unittest.py:
"""
write_into_file_at_path('ChangeLog', first_entry)
run_command(['svn', 'add', 'ChangeLog'])
run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
# Patch files were created against just 'first_entry'.
# Add a second commit to make svn-apply have to apply the patches with fuzz.
changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
write_into_file_at_path('ChangeLog', changelog_contents)
run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(one_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
self.assertEqual(read_from_path('ChangeLog'), expected_changelog_contents)
self.scm.revert_files(['ChangeLog'])
self.checkout.apply_patch(self._create_patch(two_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
self.assertEqual(read_from_path('ChangeLog'), expected_changelog_contents)
def setUp(self):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
self.scm = detect_scm_system(self.svn_checkout_path)
self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
def test_detect_scm_system_relative_url(self):
scm = detect_scm_system(".")
# I wanted to assert that we got the right path, but there was some
# crazy magic with temp folder names that I couldn't figure out.
self.assertTrue(scm.checkout_root)
def test_create_patch_is_full_patch(self):
test_dir_path = os.path.join(self.svn_checkout_path, "test_dir2")
os.mkdir(test_dir_path)
test_file_path = os.path.join(test_dir_path, 'test_file2')
write_into_file_at_path(test_file_path, 'test content')
run_command(['svn', 'add', 'test_dir2'])
# create_patch depends on 'svn-create-patch', so make a dummy version.
scripts_path = os.path.join(self.svn_checkout_path, 'Tools', 'Scripts')
os.makedirs(scripts_path)
create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n.
os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
# Change into our test directory and run the create_patch command.
os.chdir(test_dir_path)
scm = detect_scm_system(test_dir_path)
self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
patch_contents = scm.create_patch()
# Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
def test_detection(self):
self.assertEqual(self.scm.display_name(), "svn")
self.assertEqual(self.scm.supports_local_commits(), False)
def test_apply_small_binary_patch(self):
patch_contents = """Index: test_file.swf
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes on: test_file.swf
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
"""
expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
self._setup_webkittools_scripts_symlink(self.scm)
patch_file = self._create_patch(patch_contents)
self.checkout.apply_patch(patch_file)
actual_contents = read_from_path("test_file.swf", encoding=None)
self.assertEqual(actual_contents, expected_contents)
def test_apply_svn_patch(self):
patch = self._create_patch(_svn_diff("-r5:4"))
self._setup_webkittools_scripts_symlink(self.scm)
Checkout(self.scm).apply_patch(patch)
def test_commit_logs(self):
# Commits have dates and usernames in them, so we can't just direct compare.
self.assertRegexpMatches(self.scm.last_svn_commit_log(), 'fourth commit')
self.assertRegexpMatches(self.scm.svn_commit_log(3), 'second commit')
def _shared_test_commit_with_message(self, username=None):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit", username)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_in_subdir(self, username=None):
write_into_file_at_path('test_dir/test_file3', 'more test content')
os.chdir("test_dir")
commit_text = self.scm.commit_with_message("another test commit", username)
os.chdir("..")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_text_parsing(self):
self._shared_test_commit_with_message()
def test_commit_with_username(self):
self._shared_test_commit_with_message("[email protected]")
def test_commit_without_authorization(self):
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=111669
# This test ends up looking in the actal $HOME/.subversion for authorization,
# which makes it fragile. For now, set it to use a realm that won't be authorized,
# but we should really plumb through a fake_home_dir here like we do in
# test_has_authorization_for_realm.
self.scm.svn_server_realm = '<http://svn.example.com:80> Example'
self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)
def test_has_authorization_for_realm_using_credentials_with_passtype(self):
credentials = """
K 8
passtype
V 8
keychain
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
[email protected]
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_has_authorization_for_realm_using_credentials_with_password(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
[email protected]
K 8
password
V 4
blah
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def _test_has_authorization_for_realm_using_credentials(self, realm, credentials):
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")
write_into_file_at_path(fake_webkit_auth_file, credentials)
result = self.scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
os.remove(fake_webkit_auth_file)
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
return result
def test_not_have_authorization_for_realm_with_credentials_missing_password_and_passtype(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
[email protected]
END
"""
self.assertFalse(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_not_have_authorization_for_realm_when_missing_credentials_file(self):
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
self.assertFalse(self.scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_added_files(self):
self._shared_test_added_files()
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
os.chdir(self.svn_checkout_path)
self.scm.delete("test_file")
self.assertIn("test_file", self.scm.deleted_files())
def test_delete_list(self):
os.chdir(self.svn_checkout_path)
self.scm.delete_list(["test_file", "test_file2"])
self.assertIn("test_file", self.scm.deleted_files())
self.assertIn("test_file2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_propset_propget(self):
filepath = os.path.join(self.svn_checkout_path, "test_file")
expected_mime_type = "x-application/foo-bar"
self.scm.propset("svn:mime-type", expected_mime_type, filepath)
self.assertEqual(expected_mime_type, self.scm.propget("svn:mime-type", filepath))
def test_show_head(self):
write_into_file_at_path("test_file", u"Hello!", "utf-8")
SVNTestRepository._svn_commit("fourth commit")
self.assertEqual("Hello!", self.scm.show_head('test_file'))
def test_show_head_binary(self):
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def do_test_diff_for_file(self):
write_into_file_at_path('test_file', 'some content')
self.scm.commit_with_message("a test commit")
diff = self.scm.diff_for_file('test_file')
self.assertEqual(diff, "")
write_into_file_at_path("test_file", "changed content")
diff = self.scm.diff_for_file('test_file')
self.assertIn("-some content", diff)
self.assertIn("+changed content", diff)
def clean_bogus_dir(self):
self.bogus_dir = self.scm._bogus_dir_name()
if os.path.exists(self.bogus_dir):
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_existing_bogus_dir(self):
self.clean_bogus_dir()
os.mkdir(self.bogus_dir)
self.do_test_diff_for_file()
self.assertTrue(os.path.exists(self.bogus_dir))
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_missing_bogus_dir(self):
self.clean_bogus_dir()
self.do_test_diff_for_file()
self.assertFalse(os.path.exists(self.bogus_dir))
def test_svn_lock(self):
if self.scm.svn_version() >= "1.7":
# the following technique with .svn/lock then svn update doesn't work with subversion client 1.7 or later
pass
else:
svn_root_lock_path = ".svn/lock"
write_into_file_at_path(svn_root_lock_path, "", "utf-8")
# webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
self.assertRaises(ScriptError, run_command, ['svn', 'update'])
self.scm.discard_working_directory_changes()
self.assertFalse(os.path.exists(svn_root_lock_path))
run_command(['svn', 'update']) # Should succeed and not raise.
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_with_message)
class GitTest(SCMTest):
def setUp(self):
"""Sets up fresh git repository with one commit. Then setups a second git
repo that tracks the first one."""
# FIXME: We should instead clone a git repo that is tracking an SVN repo.
# That better matches what we do with WebKit.
self.original_dir = os.getcwd()
self.untracking_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout2")
run_command(['git', 'init', self.untracking_checkout_path])
os.chdir(self.untracking_checkout_path)
write_into_file_at_path('foo_file', 'foo')
run_command(['git', 'add', 'foo_file'])
run_command(['git', 'commit', '-am', 'dummy commit'])
self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
self.tracking_git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
run_command(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
os.chdir(self.tracking_git_checkout_path)
self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
def tearDown(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.tracking_git_checkout_path])
run_command(['rm', '-rf', self.untracking_checkout_path])
def test_remote_branch_ref(self):
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'refs/remotes/origin/master')
os.chdir(self.untracking_checkout_path)
self.assertRaises(ScriptError, self.untracking_scm.remote_branch_ref)
def test_multiple_remotes(self):
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'remote1')
def test_create_patch(self):
write_into_file_at_path('test_file_commit1', 'contents')
run_command(['git', 'add', 'test_file_commit1'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertNotRegexpMatches(patch, r'Subversion Revision:')
def test_orderfile(self):
os.mkdir("Tools")
os.mkdir("Source")
os.mkdir("LayoutTests")
os.mkdir("Websites")
# Slash should always be the right path separator since we use cygwin on Windows.
Tools_ChangeLog = "Tools/ChangeLog"
write_into_file_at_path(Tools_ChangeLog, "contents")
Source_ChangeLog = "Source/ChangeLog"
write_into_file_at_path(Source_ChangeLog, "contents")
LayoutTests_ChangeLog = "LayoutTests/ChangeLog"
write_into_file_at_path(LayoutTests_ChangeLog, "contents")
Websites_ChangeLog = "Websites/ChangeLog"
write_into_file_at_path(Websites_ChangeLog, "contents")
Tools_ChangeFile = "Tools/ChangeFile"
write_into_file_at_path(Tools_ChangeFile, "contents")
Source_ChangeFile = "Source/ChangeFile"
write_into_file_at_path(Source_ChangeFile, "contents")
LayoutTests_ChangeFile = "LayoutTests/ChangeFile"
write_into_file_at_path(LayoutTests_ChangeFile, "contents")
Websites_ChangeFile = "Websites/ChangeFile"
write_into_file_at_path(Websites_ChangeFile, "contents")
run_command(['git', 'add', 'Tools/ChangeLog'])
run_command(['git', 'add', 'LayoutTests/ChangeLog'])
run_command(['git', 'add', 'Source/ChangeLog'])
run_command(['git', 'add', 'Websites/ChangeLog'])
run_command(['git', 'add', 'Tools/ChangeFile'])
run_command(['git', 'add', 'LayoutTests/ChangeFile'])
run_command(['git', 'add', 'Source/ChangeFile'])
run_command(['git', 'add', 'Websites/ChangeFile'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'Tools/ChangeFile', patch).start())
self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'Websites/ChangeFile', patch).start())
self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'Source/ChangeFile', patch).start())
self.assertTrue(re.search(r'LayoutTests/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
def test_exists(self):
scm = self.untracking_scm
self._shared_test_exists(scm, scm.commit_locally_with_message)
def test_head_svn_revision(self):
scm = detect_scm_system(self.untracking_checkout_path)
# If we cloned a git repo tracking an SVN repo, this would give the same result as
# self._shared_test_head_svn_revision().
self.assertEqual(scm.head_svn_revision(), '')
def test_rename_files(self):
scm = self.tracking_scm
run_command(['git', 'mv', 'foo_file', 'bar_file'])
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertNotRegexpMatches(patch, r'rename from ')
self.assertNotRegexpMatches(patch, r'rename to ')
class GitSVNTest(SCMTest):
def _setup_git_checkout(self):
self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
# --quiet doesn't make git svn silent, so we use run_silent to redirect output
run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
os.chdir(self.git_checkout_path)
def _tear_down_git_checkout(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.git_checkout_path])
def setUp(self):
self.original_dir = os.getcwd()
SVNTestRepository.setup(self)
self._setup_git_checkout()
self.scm = detect_scm_system(self.git_checkout_path)
self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
self._tear_down_git_checkout()
def test_detection(self):
self.assertEqual(self.scm.display_name(), "git")
self.assertEqual(self.scm.supports_local_commits(), True)
def test_read_git_config(self):
key = 'test.git-config'
value = 'git-config value'
run_command(['git', 'config', key, value])
self.assertEqual(self.scm.read_git_config(key), value)
def test_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
def test_discard_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
self.scm.discard_local_commits()
self.assertEqual(len(self.scm.local_commits()), 0)
def test_delete_branch(self):
new_branch = 'foo'
run_command(['git', 'checkout', '-b', new_branch])
self.assertEqual(run_command(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
run_command(['git', 'checkout', '-b', 'bar'])
self.scm.delete_branch(new_branch)
self.assertNotRegexpMatches(run_command(['git', 'branch']), r'foo')
def test_remote_merge_base(self):
# Diff to merge-base should include working-copy changes,
# which the diff to svn_branch.. doesn't.
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..')
diff_to_merge_base = _git_diff(self.scm.remote_merge_base())
self.assertNotRegexpMatches(diff_to_common_base, r'foo')
self.assertRegexpMatches(diff_to_merge_base, r'foo')
def test_rebase_in_progress(self):
svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
write_into_file_at_path(svn_test_file, "svn_checkout")
run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
git_test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(git_test_file, "git_checkout")
run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
# --quiet doesn't make git svn silent, so use run_silent to redirect output
self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
self.assertTrue(self.scm.rebase_in_progress())
# Make sure our cleanup works.
self.scm.discard_working_directory_changes()
self.assertFalse(self.scm.rebase_in_progress())
# Make sure cleanup doesn't throw when no rebase is in progress.
self.scm.discard_working_directory_changes()
def test_commitish_parsing(self):
# Multiple revisions are cherry-picked.
self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
# ... is an invalid range specifier
self.assertRaises(ScriptError, self.scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
def test_commitish_order(self):
commit_range = 'HEAD~3..HEAD'
actual_commits = self.scm.commit_ids_from_commitish_arguments([commit_range])
expected_commits = []
expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
self.assertEqual(actual_commits, expected_commits)
def test_apply_git_patch(self):
# We carefullly pick a diff which does not have a directory addition
# as currently svn-apply will error out when trying to remove directories
# in Git: https://bugs.webkit.org/show_bug.cgi?id=34871
patch = self._create_patch(_git_diff('HEAD..HEAD^'))
self._setup_webkittools_scripts_symlink(self.scm)
Checkout(self.scm).apply_patch(patch)
def test_commit_text_parsing(self):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_with_message_working_copy_only(self):
write_into_file_at_path('test_file_commit1', 'more test content')
run_command(['git', 'add', 'test_file_commit1'])
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def _local_commit(self, filename, contents, message):
write_into_file_at_path(filename, contents)
run_command(['git', 'add', filename])
self.scm.commit_locally_with_message(message)
def _one_local_commit(self):
self._local_commit('test_file_commit1', 'more test content', 'another test commit')
def _one_local_commit_plus_working_copy_changes(self):
self._one_local_commit()
write_into_file_at_path('test_file_commit2', 'still more test content')
run_command(['git', 'add', 'test_file_commit2'])
def _second_local_commit(self):
self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
def _two_local_commits(self):
self._one_local_commit()
self._second_local_commit()
def _three_local_commits(self):
self._local_commit('test_file_commit0', 'more test content', 'another test commit')
self._two_local_commits()
def test_revisions_changing_files_with_local_commit(self):
self._one_local_commit()
self.assertItemsEqual(self.scm.revisions_changing_file('test_file_commit1'), [])
def test_commit_with_message(self):
self._one_local_commit_plus_working_copy_changes()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit(self):
self._two_local_commits()
commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD^")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
self.assertNotRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_git_commit_range(self):
self._three_local_commits()
commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertNotRegexpMatches(svn_log, r'test_file_commit0')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
self.assertRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_only_local_commit(self):
self._one_local_commit()
commit_text = self.scm.commit_with_message("another test commit")
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", git_commit="HEAD^")
def test_commit_with_message_multiple_local_commits_always_squash(self):
run_command(['git', 'config', 'webkit-patch.commit-should-always-squash', 'true'])
self._two_local_commits()
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits(self):
self._two_local_commits()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertNotRegexpMatches(svn_log, r'test_file2')
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced_with_conflict(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._local_commit('test_file2', 'asdf', 'asdf commit')
# There's a conflict between trunk and the test_file2 modification.
self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", force_squash=True)
def test_upstream_branch(self):
run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self.assertEqual(self.scm._upstream_branch(), 'my-branch')
def test_remote_branch_ref(self):
self.assertEqual(self.scm.remote_branch_ref(), 'refs/remotes/trunk')
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_create_patch_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_after_merge(self):
run_command(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
self._one_local_commit()
run_command(['git', 'merge', 'trunk'])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_with_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(changed_files=['test_file_commit2'])
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_with_rm_and_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
os.remove('test_file_commit1')
patch = self.scm.create_patch()
patch_with_changed_files = self.scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
self.assertEqual(patch, patch_with_changed_files)
def test_create_patch_git_commit(self):
self._two_local_commits()
patch = self.scm.create_patch(git_commit="HEAD^")
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertNotRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_git_commit_range(self):
self._three_local_commits()
patch = self.scm.create_patch(git_commit="HEAD~2..HEAD")
self.assertNotRegexpMatches(patch, r'test_file_commit0')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(git_commit="HEAD....")
self.assertNotRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_multiple_local_commits(self):
self._two_local_commits()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
patch = self.scm.create_patch()
self.assertNotRegexpMatches(patch, r'test_file2')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_binary_patch(self):
# Create a git binary patch and check the contents.
test_file_name = 'binary_file'
test_file_path = os.path.join(self.git_checkout_path, test_file_name)
file_contents = ''.join(map(chr, range(256)))
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'\nliteral 0\n')
self.assertRegexpMatches(patch, r'\nliteral 256\n')
# Check if we can apply the created patch.
run_command(['git', 'rm', '-f', test_file_name])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(patch))
self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None))
# Check if we can create a patch from a local commit.
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
run_command(['git', 'commit', '-m', 'binary diff'])
patch_from_local_commit = self.scm.create_patch('HEAD')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 0\n')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 256\n')
def test_changed_files_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files()
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
# working copy should *not* be in the list.
files = self.scm.changed_files('trunk..')
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
# working copy *should* be in the list.
files = self.scm.changed_files('trunk....')
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_git_commit(self):
self._two_local_commits()
files = self.scm.changed_files(git_commit="HEAD^")
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
def test_changed_files_git_commit_range(self):
self._three_local_commits()
files = self.scm.changed_files(git_commit="HEAD~2..HEAD")
self.assertNotIn('test_file_commit0', files)
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files(git_commit="HEAD....")
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_multiple_local_commits(self):
self._two_local_commits()
files = self.scm.changed_files()
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
files = self.scm.changed_files()
self.assertNotIn('test_file2', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
files = self.scm.changed_files()
self.assertNotIn('test_file2', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_changed_files_upstream(self):
run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
self._one_local_commit()
run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self._second_local_commit()
write_into_file_at_path('test_file_commit0', 'more test content')
run_command(['git', 'add', 'test_file_commit0'])
# equivalent to 'git diff my-branch..HEAD, should not include working changes
files = self.scm.changed_files(git_commit='UPSTREAM..')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertNotIn('test_file_commit0', files)
# equivalent to 'git diff my-branch', *should* include working changes
files = self.scm.changed_files(git_commit='UPSTREAM....')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit0', files)
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_added_files(self):
self._shared_test_added_files()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
self._two_local_commits()
self.scm.delete('test_file_commit1')
self.assertIn("test_file_commit1", self.scm.deleted_files())
def test_delete_list(self):
self._two_local_commits()
self.scm.delete_list(["test_file_commit1", "test_file_commit2"])
self.assertIn("test_file_commit1", self.scm.deleted_files())
self.assertIn("test_file_commit2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_to_object_name(self):
relpath = 'test_file_commit1'
fullpath = os.path.realpath(os.path.join(self.git_checkout_path, relpath))
self.assertEqual(relpath, self.scm.to_object_name(fullpath))
def test_show_head(self):
self._two_local_commits()
self.assertEqual("more test content", self.scm.show_head('test_file_commit1'))
def test_show_head_binary(self):
self._two_local_commits()
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_locally_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def test_diff_for_file(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', "Updated", encoding=None)
diff = self.scm.diff_for_file('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertIn("+Updated", diff)
self.assertIn("-more test content", diff)
self.scm.add('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertIn("+Updated", cached_diff)
self.assertIn("-more test content", cached_diff)
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_locally_with_message)
# We need to split off more of these SCM tests to use mocks instead of the filesystem.
# This class is the first part of that.
class GitTestWithMock(unittest.TestCase):
maxDiff = None
def make_scm(self, logging_executive=False):
# We do this should_log dance to avoid logging when Git.__init__ runs sysctl on mac to check for 64-bit support.
scm = Git(cwd=".", executive=MockExecutive(), filesystem=MockFileSystem())
scm.read_git_config = lambda *args, **kw: "MOCKKEY:MOCKVALUE"
scm._executive._should_log = logging_executive
return scm
def test_create_patch(self):
scm = self.make_scm(logging_executive=True)
expected_stderr = """\
MOCK run_command: ['git', 'merge-base', 'MOCKVALUE', 'HEAD'], cwd=%(checkout)s
MOCK run_command: ['git', 'diff', '--binary', '--no-color', '--no-ext-diff', '--full-index', '--no-renames', '', 'MOCK output of child process', '--'], cwd=%(checkout)s
MOCK run_command: ['git', 'rev-parse', '--show-toplevel'], cwd=%(checkout)s
MOCK run_command: ['git', 'log', '-1', '--grep=git-svn-id:', '--date=iso', './MOCK output of child process/MOCK output of child process'], cwd=%(checkout)s
""" % {'checkout': scm.checkout_root}
OutputCapture().assert_outputs(self, scm.create_patch, expected_logs=expected_stderr)
def test_push_local_commits_to_server_with_username_and_password(self):
self.assertEqual(self.make_scm().push_local_commits_to_server(username='[email protected]', password='blah'), "MOCK output of child process")
def test_push_local_commits_to_server_without_username_and_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server)
def test_push_local_commits_to_server_with_username_and_without_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'username': '[email protected]'})
def test_push_local_commits_to_server_without_username_and_with_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'password': 'blah'})
def test_timestamp_of_revision(self):
scm = self.make_scm()
scm.find_checkout_root = lambda path: ''
scm._run_git = lambda args: 'Date: 2013-02-08 08:05:49 +0000'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T08:05:49Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:02:03 +0130'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-07T23:32:03Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:55:21 -0800'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T09:55:21Z')
| bsd-3-clause |
tntnatbry/tensorflow | tensorflow/python/layers/base.py | 8 | 12144 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the base Layer class, from which all layers inherit.
This is a private class and its internal implementation is subject to changes
in the future.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import variable_scope as vs
class _Layer(object):
"""Base layer class.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
This is the class from which all layers inherit, implementing common
infrastructure functionality.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing variables,
losses, and updates, as well as applying TensorFlow ops to input tensors.
Properties:
trainable: Whether the layer should be trained (boolean).
name: The name of the layer (string).
dtype: Default dtype of the layer (dtypes.float32).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
"""
def __init__(self, trainable=True, name=None,
dtype=dtypes.float32, **kwargs):
# We use a kwargs dict here because these kwargs only exist
# for compatibility reasons.
# The list of kwargs is subject to changes in the future.
# We do not want to commit to it or to expose the list to users at all.
# Note this is exactly as safe as defining kwargs in the function signature,
# the only difference being that the list of valid kwargs is defined
# below rather rather in the signature, and default values are defined
# in calls to kwargs.get().
allowed_kwargs = {
'_scope',
'_reuse',
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
self._trainable = trainable
self._built = False
self._trainable_variables = []
self._non_trainable_variables = []
self._updates = []
self._losses = []
self._reuse = kwargs.get('_reuse')
self.dtype = dtype
# Determine base name (non-unique).
base_name = name
if not name:
base_name = _to_snake_case(self.__class__.__name__)
# Determine variable scope.
scope = kwargs.get('_scope')
if scope:
self._scope = next(vs.variable_scope(scope).gen)
else:
self._scope = next(vs.variable_scope(None, default_name=base_name).gen)
# Unique name is borrowed from scope to match variable names.
self.name = self._scope.name
def __setattr__(self, name, value):
if hasattr(self, name):
# Only allow private attributes to be set more than once, under the
# convention that private attributes should only be set from inside
# the class.
# All attributes meant to be set several times should be set to private.
if name[0] != '_':
raise AttributeError('Read-only property cannot be set: %s' % name)
super(_Layer, self).__setattr__(name, value)
@property
def trainable_variables(self):
return self._trainable_variables if self.trainable else []
@property
def non_trainable_variables(self):
return self._non_trainable_variables if self.trainable else self.variables
@property
def trainable_weights(self):
return self.trainable_variables
@property
def non_trainable_weights(self):
return self.non_trainable_variables
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self._trainable_variables + self._non_trainable_variables
@property
def updates(self):
return self._updates
@property
def losses(self):
return self._losses
@property
def built(self):
return self._built
@property
def trainable(self):
return self._trainable
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.variables
def build(self, _):
"""Creates the variables of the layer.
"""
self._built = True
def call(self, inputs, **kwargs):
"""The logic of the layer lives here.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments.
Returns:
Output tensor(s).
"""
raise NotImplementedError
def _compute_output_shape(self, input_shape):
"""Computes the output shape of the layer given the input shape.
Assumes that the layer will be built to match that input shape.
Args:
input_shape: A (possibly nested tuple of) `TensorShape`. It need not
be fully defined (e.g. the batch size may be unknown).
Returns:
A (possibly nested tuple of) `TensorShape`.
Raises:
TypeError: if `input_shape` is not a (possibly nested tuple of)
`TensorShape`.
ValueError: if `input_shape` is incomplete or is incompatible with the
the layer.
"""
raise NotImplementedError
def _add_variable(self, name, shape, dtype=None,
initializer=None, regularizer=None, trainable=True,
variable_getter=vs.get_variable):
"""Adds a new variable to the layer.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
variable_getter: The getter to use for TensorFlow variables.
Returns:
The created variable.
"""
if dtype is None:
dtype = self.dtype
existing_variables = set(tf_variables.global_variables())
variable = variable_getter(name,
shape=shape,
initializer=initializer,
dtype=dtype,
trainable=trainable and self.trainable)
# TODO(sguada) fix name = variable.op.name
if variable in existing_variables:
return variable
if regularizer:
# To match the behavior of tf.get_variable(), we only
# apply regularization if the variable is newly created.
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
else:
with ops.colocate_with(variable.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(variable)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
if trainable:
self._trainable_variables.append(variable)
else:
self._non_trainable_variables.append(variable)
return variable
def __call__(self, inputs, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
# Define a custom getter to override tf.get_variable when creating layer
# variables. The current custom getter is nested by the variable scope.
def variable_getter(getter, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, **kwargs):
return self._add_variable(
name, shape, initializer=initializer, regularizer=regularizer,
dtype=dtype, trainable=trainable,
variable_getter=functools.partial(getter, **kwargs))
# Build (if necessary) and call the layer, inside a variable scope.
with vs.variable_scope(self._scope,
reuse=True if self._built else self._reuse,
custom_getter=variable_getter) as scope:
with ops.name_scope(scope.original_name_scope):
if not self.built:
input_list = _to_list(inputs)
input_shapes = [x.get_shape() for x in input_list]
if len(input_shapes) == 1:
self.build(input_shapes[0])
else:
self.build(input_shapes)
self._built = True
outputs = self.call(inputs, **kwargs)
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
output_list = _to_list(outputs)
for output in output_list:
with ops.name_scope('ActivityRegularizer'):
activity_regularization = self.activity_regularizer(output)
self._losses.append(activity_regularization)
_add_elements_to_collection(
activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def apply(self, inputs, **kwargs):
"""Apply the layer on a input.
This simply wraps `self.__call__`.
Arguments:
inputs: Input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, **kwargs)
def _to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def _to_list(x):
"""This normalizes a list/tuple or single element into a list.
If a single element is passed, we return
a list of size 1 containing the element.
Arguments:
x: list or tuple or single element.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x]
def _add_elements_to_collection(elements, collections):
elements = _to_list(elements)
collections = _to_list(collections)
for name in collections:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
| apache-2.0 |
keyurpatel076/MissionPlannerGit | packages/IronPython.StdLib.2.7.5-beta1/content/Lib/sre_constants.py | 349 | 7137 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode locale
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print "done"
| gpl-3.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/webrtc/tools/compare_videos.py | 13 | 5925 | #!/usr/bin/env python
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Chrome browsertests will throw away stderr; avoid that output gets lost.
sys.stderr = sys.stdout
def _ParseArgs():
"""Registers the command-line options."""
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--label', type='string', default='MY_TEST',
help=('Label of the test, used to identify different '
'tests. Default: %default'))
parser.add_option('--ref_video', type='string',
help='Reference video to compare with (YUV).')
parser.add_option('--test_video', type='string',
help=('Test video to be compared with the reference '
'video (YUV).'))
parser.add_option('--frame_analyzer', type='string',
help='Path to the frame analyzer executable.')
parser.add_option('--barcode_decoder', type='string',
help=('Path to the barcode decoder script. By default, we '
'will assume we can find it in barcode_tools/'
'relative to this directory.'))
parser.add_option('--ffmpeg_path', type='string',
help=('The path to where the ffmpeg executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name ffmpeg[.exe].'))
parser.add_option('--zxing_path', type='string',
help=('The path to where the zxing executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name zxing[.exe].'))
parser.add_option('--stats_file', type='string', default='stats.txt',
help=('Path to the temporary stats file to be created and '
'used. Default: %default'))
parser.add_option('--yuv_frame_width', type='int', default=640,
help='Width of the YUV file\'s frames. Default: %default')
parser.add_option('--yuv_frame_height', type='int', default=480,
help='Height of the YUV file\'s frames. Default: %default')
options, _ = parser.parse_args()
if not options.ref_video:
parser.error('You must provide a path to the reference video!')
if not os.path.exists(options.ref_video):
parser.error('Cannot find the reference video at %s' % options.ref_video)
if not options.test_video:
parser.error('You must provide a path to the test video!')
if not os.path.exists(options.test_video):
parser.error('Cannot find the test video at %s' % options.test_video)
if not options.frame_analyzer:
parser.error('You must provide the path to the frame analyzer executable!')
if not os.path.exists(options.frame_analyzer):
parser.error('Cannot find frame analyzer executable at %s!' %
options.frame_analyzer)
return options
def main():
"""The main function.
A simple invocation is:
./webrtc/tools/barcode_tools/compare_videos.py
--ref_video=<path_and_name_of_reference_video>
--test_video=<path_and_name_of_test_video>
--frame_analyzer=<path_and_name_of_the_frame_analyzer_executable>
Notice that the prerequisites for barcode_decoder.py also applies to this
script. The means the following executables have to be available in the PATH:
* zxing
* ffmpeg
"""
options = _ParseArgs()
if options.barcode_decoder:
path_to_decoder = options.barcode_decoder
else:
path_to_decoder = os.path.join(SCRIPT_DIR, 'barcode_tools',
'barcode_decoder.py')
# On Windows, sometimes the inherited stdin handle from the parent process
# fails. Work around this by passing null to stdin to the subprocesses.
null_filehandle = open(os.devnull, 'r')
# Run barcode decoder on the test video to identify frame numbers.
png_working_directory = tempfile.mkdtemp()
cmd = [
sys.executable,
path_to_decoder,
'--yuv_file=%s' % options.test_video,
'--yuv_frame_width=%d' % options.yuv_frame_width,
'--yuv_frame_height=%d' % options.yuv_frame_height,
'--stats_file=%s' % options.stats_file,
'--png_working_dir=%s' % png_working_directory,
]
if options.zxing_path:
cmd.append('--zxing_path=%s' % options.zxing_path)
if options.ffmpeg_path:
cmd.append('--ffmpeg_path=%s' % options.ffmpeg_path)
barcode_decoder = subprocess.Popen(cmd, stdin=null_filehandle,
stdout=sys.stdout, stderr=sys.stderr)
barcode_decoder.wait()
shutil.rmtree(png_working_directory)
if barcode_decoder.returncode != 0:
print 'Failed to run barcode decoder script.'
return 1
# Run frame analyzer to compare the videos and print output.
cmd = [
options.frame_analyzer,
'--label=%s' % options.label,
'--reference_file=%s' % options.ref_video,
'--test_file=%s' % options.test_video,
'--stats_file=%s' % options.stats_file,
'--width=%d' % options.yuv_frame_width,
'--height=%d' % options.yuv_frame_height,
]
frame_analyzer = subprocess.Popen(cmd, stdin=null_filehandle,
stdout=sys.stdout, stderr=sys.stderr)
frame_analyzer.wait()
if frame_analyzer.returncode != 0:
print 'Failed to run frame analyzer.'
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
StefanRijnhart/OpenUpgrade | addons/edi/models/__init__.py | 442 | 1116 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import edi
import res_partner
import res_company
import res_currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ChromiumWebApps/chromium | chrome/test/mini_installer/registry_verifier.py | 44 | 4287 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import _winreg
import verifier
class RegistryVerifier(verifier.Verifier):
"""Verifies that the current registry matches the specified criteria."""
def _RootKeyConstant(self, root_key):
"""Converts a root registry key string into a _winreg.HKEY_* constant."""
root_key_mapping = {
'HKEY_CLASSES_ROOT': _winreg.HKEY_CLASSES_ROOT,
'HKEY_CURRENT_USER': _winreg.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': _winreg.HKEY_LOCAL_MACHINE,
'HKEY_USERS': _winreg.HKEY_USERS,
}
if root_key not in root_key_mapping:
raise KeyError("Unknown root registry key '%s'" % root_key)
return root_key_mapping[root_key]
def _ValueTypeConstant(self, value_type):
"""Converts a registry value type string into a _winreg.REG_* constant."""
value_type_mapping = {
'BINARY': _winreg.REG_BINARY,
'DWORD': _winreg.REG_DWORD,
'DWORD_LITTLE_ENDIAN': _winreg.REG_DWORD_LITTLE_ENDIAN,
'DWORD_BIG_ENDIAN': _winreg.REG_DWORD_BIG_ENDIAN,
'EXPAND_SZ': _winreg.REG_EXPAND_SZ,
'LINK': _winreg.REG_LINK,
'MULTI_SZ': _winreg.REG_MULTI_SZ,
'NONE': _winreg.REG_NONE,
'SZ': _winreg.REG_SZ,
}
if value_type not in value_type_mapping:
raise KeyError("Unknown registry value type '%s'" % value_type)
return value_type_mapping[value_type]
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Overridden from verifier.Verifier.
Verifies a registry key according to the |expectation|.
Args:
expectation_name: The registry key being verified. It is expanded using
Expand.
expectation: A dictionary with the following keys and values:
'exists' a boolean indicating whether the registry key should exist.
'values' (optional) a dictionary where each key is a registry value
and its associated value is a dictionary with the following key
and values:
'type' a string indicating the type of the registry value.
'data' the associated data of the registry value. If it is a
string, it is expanded using Expand.
variable_expander: A VariableExpander object.
"""
key = variable_expander.Expand(expectation_name)
root_key, sub_key = key.split('\\', 1)
try:
# Query the Windows registry for the registry key. It will throw a
# WindowsError if the key doesn't exist.
key_handle = _winreg.OpenKey(self._RootKeyConstant(root_key), sub_key, 0,
_winreg.KEY_QUERY_VALUE)
except WindowsError:
# Key doesn't exist. See that it matches the expectation.
assert not expectation['exists'], ('Registry key %s is missing' %
key)
return
# The key exists, see that it matches the expectation.
assert expectation['exists'], ('Registry key %s exists' % key)
# Verify the expected values.
if 'values' not in expectation:
return
for value, value_expectation in expectation['values'].iteritems():
# Query the value. It will throw a WindowsError if the value doesn't
# exist.
try:
data, value_type = _winreg.QueryValueEx(key_handle, value)
except WindowsError:
raise KeyError("Value '%s' of registry key %s is missing" % (
value, key))
# Verify the type of the value.
expected_value_type = value_expectation['type']
assert self._ValueTypeConstant(expected_value_type) == value_type, \
"Value '%s' of registry key %s has unexpected type '%s'" % (
value, key, expected_value_type)
# Verify the associated data of the value.
expected_data = value_expectation['data']
if isinstance(expected_data, basestring):
expected_data = variable_expander.Expand(expected_data)
assert expected_data == data, \
("Value '%s' of registry key %s has unexpected data.\n"
" Expected: %s\n"
" Actual: %s" % (value, key, expected_data, data))
| bsd-3-clause |
crosswalk-project/chromium-crosswalk-efl | third_party/cython/src/Cython/Compiler/Parsing.py | 87 | 107990 | # cython: auto_cpdef=True, infer_types=True, language_level=3, py2_import=True
#
# Parser
#
# This should be done automatically
import cython
cython.declare(Nodes=object, ExprNodes=object, EncodedString=object,
BytesLiteral=object, StringEncoding=object,
FileSourceDescriptor=object, lookup_unicodechar=object,
Future=object, Options=object, error=object, warning=object,
Builtin=object, ModuleNode=object, Utils=object,
re=object, _unicode=object, _bytes=object)
import re
from unicodedata import lookup as lookup_unicodechar
from Cython.Compiler.Scanning import PyrexScanner, FileSourceDescriptor
import Nodes
import ExprNodes
import Builtin
import StringEncoding
from StringEncoding import EncodedString, BytesLiteral, _unicode, _bytes
from ModuleNode import ModuleNode
from Errors import error, warning
from Cython import Utils
import Future
import Options
class Ctx(object):
# Parsing context
level = 'other'
visibility = 'private'
cdef_flag = 0
typedef_flag = 0
api = 0
overridable = 0
nogil = 0
namespace = None
templates = None
allow_struct_enum_decorator = False
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __call__(self, **kwds):
ctx = Ctx()
d = ctx.__dict__
d.update(self.__dict__)
d.update(kwds)
return ctx
def p_ident(s, message = "Expected an identifier"):
if s.sy == 'IDENT':
name = s.systring
s.next()
return name
else:
s.error(message)
def p_ident_list(s):
names = []
while s.sy == 'IDENT':
names.append(s.systring)
s.next()
if s.sy != ',':
break
s.next()
return names
#------------------------------------------
#
# Expressions
#
#------------------------------------------
def p_binop_operator(s):
pos = s.position()
op = s.sy
s.next()
return op, pos
def p_binop_expr(s, ops, p_sub_expr):
n1 = p_sub_expr(s)
while s.sy in ops:
op, pos = p_binop_operator(s)
n2 = p_sub_expr(s)
n1 = ExprNodes.binop_node(pos, op, n1, n2)
if op == '/':
if Future.division in s.context.future_directives:
n1.truedivision = True
else:
n1.truedivision = None # unknown
return n1
#lambdef: 'lambda' [varargslist] ':' test
def p_lambdef(s, allow_conditional=True):
# s.sy == 'lambda'
pos = s.position()
s.next()
if s.sy == ':':
args = []
star_arg = starstar_arg = None
else:
args, star_arg, starstar_arg = p_varargslist(
s, terminator=':', annotated=False)
s.expect(':')
if allow_conditional:
expr = p_test(s)
else:
expr = p_test_nocond(s)
return ExprNodes.LambdaNode(
pos, args = args,
star_arg = star_arg, starstar_arg = starstar_arg,
result_expr = expr)
#lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
def p_lambdef_nocond(s):
return p_lambdef(s, allow_conditional=False)
#test: or_test ['if' or_test 'else' test] | lambdef
def p_test(s):
if s.sy == 'lambda':
return p_lambdef(s)
pos = s.position()
expr = p_or_test(s)
if s.sy == 'if':
s.next()
test = p_or_test(s)
s.expect('else')
other = p_test(s)
return ExprNodes.CondExprNode(pos, test=test, true_val=expr, false_val=other)
else:
return expr
#test_nocond: or_test | lambdef_nocond
def p_test_nocond(s):
if s.sy == 'lambda':
return p_lambdef_nocond(s)
else:
return p_or_test(s)
#or_test: and_test ('or' and_test)*
def p_or_test(s):
return p_rassoc_binop_expr(s, ('or',), p_and_test)
def p_rassoc_binop_expr(s, ops, p_subexpr):
n1 = p_subexpr(s)
if s.sy in ops:
pos = s.position()
op = s.sy
s.next()
n2 = p_rassoc_binop_expr(s, ops, p_subexpr)
n1 = ExprNodes.binop_node(pos, op, n1, n2)
return n1
#and_test: not_test ('and' not_test)*
def p_and_test(s):
#return p_binop_expr(s, ('and',), p_not_test)
return p_rassoc_binop_expr(s, ('and',), p_not_test)
#not_test: 'not' not_test | comparison
def p_not_test(s):
if s.sy == 'not':
pos = s.position()
s.next()
return ExprNodes.NotNode(pos, operand = p_not_test(s))
else:
return p_comparison(s)
#comparison: expr (comp_op expr)*
#comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
def p_comparison(s):
n1 = p_starred_expr(s)
if s.sy in comparison_ops:
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
n1 = ExprNodes.PrimaryCmpNode(pos,
operator = op, operand1 = n1, operand2 = n2)
if s.sy in comparison_ops:
n1.cascade = p_cascaded_cmp(s)
return n1
def p_test_or_starred_expr(s):
if s.sy == '*':
return p_starred_expr(s)
else:
return p_test(s)
def p_starred_expr(s):
pos = s.position()
if s.sy == '*':
starred = True
s.next()
else:
starred = False
expr = p_bit_expr(s)
if starred:
expr = ExprNodes.StarredTargetNode(pos, expr)
return expr
def p_cascaded_cmp(s):
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
result = ExprNodes.CascadedCmpNode(pos,
operator = op, operand2 = n2)
if s.sy in comparison_ops:
result.cascade = p_cascaded_cmp(s)
return result
def p_cmp_op(s):
if s.sy == 'not':
s.next()
s.expect('in')
op = 'not_in'
elif s.sy == 'is':
s.next()
if s.sy == 'not':
s.next()
op = 'is_not'
else:
op = 'is'
else:
op = s.sy
s.next()
if op == '<>':
op = '!='
return op
comparison_ops = cython.declare(set, set([
'<', '>', '==', '>=', '<=', '<>', '!=',
'in', 'is', 'not'
]))
#expr: xor_expr ('|' xor_expr)*
def p_bit_expr(s):
return p_binop_expr(s, ('|',), p_xor_expr)
#xor_expr: and_expr ('^' and_expr)*
def p_xor_expr(s):
return p_binop_expr(s, ('^',), p_and_expr)
#and_expr: shift_expr ('&' shift_expr)*
def p_and_expr(s):
return p_binop_expr(s, ('&',), p_shift_expr)
#shift_expr: arith_expr (('<<'|'>>') arith_expr)*
def p_shift_expr(s):
return p_binop_expr(s, ('<<', '>>'), p_arith_expr)
#arith_expr: term (('+'|'-') term)*
def p_arith_expr(s):
return p_binop_expr(s, ('+', '-'), p_term)
#term: factor (('*'|'/'|'%') factor)*
def p_term(s):
return p_binop_expr(s, ('*', '/', '%', '//'), p_factor)
#factor: ('+'|'-'|'~'|'&'|typecast|sizeof) factor | power
def p_factor(s):
# little indirection for C-ification purposes
return _p_factor(s)
def _p_factor(s):
sy = s.sy
if sy in ('+', '-', '~'):
op = s.sy
pos = s.position()
s.next()
return ExprNodes.unop_node(pos, op, p_factor(s))
elif not s.in_python_file:
if sy == '&':
pos = s.position()
s.next()
arg = p_factor(s)
return ExprNodes.AmpersandNode(pos, operand = arg)
elif sy == "<":
return p_typecast(s)
elif sy == 'IDENT' and s.systring == "sizeof":
return p_sizeof(s)
return p_power(s)
def p_typecast(s):
# s.sy == "<"
pos = s.position()
s.next()
base_type = p_c_base_type(s)
is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode)
is_template = isinstance(base_type, Nodes.TemplatedTypeNode)
is_const = isinstance(base_type, Nodes.CConstTypeNode)
if (not is_memslice and not is_template and not is_const
and base_type.name is None):
s.error("Unknown type")
declarator = p_c_declarator(s, empty = 1)
if s.sy == '?':
s.next()
typecheck = 1
else:
typecheck = 0
s.expect(">")
operand = p_factor(s)
if is_memslice:
return ExprNodes.CythonArrayNode(pos, base_type_node=base_type,
operand=operand)
return ExprNodes.TypecastNode(pos,
base_type = base_type,
declarator = declarator,
operand = operand,
typecheck = typecheck)
def p_sizeof(s):
# s.sy == ident "sizeof"
pos = s.position()
s.next()
s.expect('(')
# Here we decide if we are looking at an expression or type
# If it is actually a type, but parsable as an expression,
# we treat it as an expression here.
if looking_at_expr(s):
operand = p_test(s)
node = ExprNodes.SizeofVarNode(pos, operand = operand)
else:
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
node = ExprNodes.SizeofTypeNode(pos,
base_type = base_type, declarator = declarator)
s.expect(')')
return node
def p_yield_expression(s):
# s.sy == "yield"
pos = s.position()
s.next()
is_yield_from = False
if s.sy == 'from':
is_yield_from = True
s.next()
if s.sy != ')' and s.sy not in statement_terminators:
arg = p_testlist(s)
else:
if is_yield_from:
s.error("'yield from' requires a source argument",
pos=pos, fatal=False)
arg = None
if is_yield_from:
return ExprNodes.YieldFromExprNode(pos, arg=arg)
else:
return ExprNodes.YieldExprNode(pos, arg=arg)
def p_yield_statement(s):
# s.sy == "yield"
yield_expr = p_yield_expression(s)
return Nodes.ExprStatNode(yield_expr.pos, expr=yield_expr)
#power: atom trailer* ('**' factor)*
def p_power(s):
if s.systring == 'new' and s.peek()[0] == 'IDENT':
return p_new_expr(s)
n1 = p_atom(s)
while s.sy in ('(', '[', '.'):
n1 = p_trailer(s, n1)
if s.sy == '**':
pos = s.position()
s.next()
n2 = p_factor(s)
n1 = ExprNodes.binop_node(pos, '**', n1, n2)
return n1
def p_new_expr(s):
# s.systring == 'new'.
pos = s.position()
s.next()
cppclass = p_c_base_type(s)
return p_call(s, ExprNodes.NewExprNode(pos, cppclass = cppclass))
#trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
def p_trailer(s, node1):
pos = s.position()
if s.sy == '(':
return p_call(s, node1)
elif s.sy == '[':
return p_index(s, node1)
else: # s.sy == '.'
s.next()
name = EncodedString( p_ident(s) )
return ExprNodes.AttributeNode(pos,
obj = node1, attribute = name)
# arglist: argument (',' argument)* [',']
# argument: [test '='] test # Really [keyword '='] test
def p_call_parse_args(s, allow_genexp = True):
# s.sy == '('
pos = s.position()
s.next()
positional_args = []
keyword_args = []
star_arg = None
starstar_arg = None
while s.sy not in ('**', ')'):
if s.sy == '*':
if star_arg:
s.error("only one star-arg parameter allowed",
pos=s.position())
s.next()
star_arg = p_test(s)
else:
arg = p_test(s)
if s.sy == '=':
s.next()
if not arg.is_name:
s.error("Expected an identifier before '='",
pos=arg.pos)
encoded_name = EncodedString(arg.name)
keyword = ExprNodes.IdentifierStringNode(
arg.pos, value=encoded_name)
arg = p_test(s)
keyword_args.append((keyword, arg))
else:
if keyword_args:
s.error("Non-keyword arg following keyword arg",
pos=arg.pos)
if star_arg:
s.error("Non-keyword arg following star-arg",
pos=arg.pos)
positional_args.append(arg)
if s.sy != ',':
break
s.next()
if s.sy == 'for':
if len(positional_args) == 1 and not star_arg:
positional_args = [ p_genexp(s, positional_args[0]) ]
elif s.sy == '**':
s.next()
starstar_arg = p_test(s)
if s.sy == ',':
s.next()
s.expect(')')
return positional_args, keyword_args, star_arg, starstar_arg
def p_call_build_packed_args(pos, positional_args, keyword_args,
star_arg, starstar_arg):
arg_tuple = None
keyword_dict = None
if positional_args or not star_arg:
arg_tuple = ExprNodes.TupleNode(pos,
args = positional_args)
if star_arg:
star_arg_tuple = ExprNodes.AsTupleNode(pos, arg = star_arg)
if arg_tuple:
arg_tuple = ExprNodes.binop_node(pos,
operator = '+', operand1 = arg_tuple,
operand2 = star_arg_tuple)
else:
arg_tuple = star_arg_tuple
if keyword_args or starstar_arg:
keyword_args = [ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)
for key, value in keyword_args]
if starstar_arg:
keyword_dict = ExprNodes.KeywordArgsNode(
pos,
starstar_arg = starstar_arg,
keyword_args = keyword_args)
else:
keyword_dict = ExprNodes.DictNode(
pos, key_value_pairs = keyword_args)
return arg_tuple, keyword_dict
def p_call(s, function):
# s.sy == '('
pos = s.position()
positional_args, keyword_args, star_arg, starstar_arg = \
p_call_parse_args(s)
if not (keyword_args or star_arg or starstar_arg):
return ExprNodes.SimpleCallNode(pos,
function = function,
args = positional_args)
else:
arg_tuple, keyword_dict = p_call_build_packed_args(
pos, positional_args, keyword_args, star_arg, starstar_arg)
return ExprNodes.GeneralCallNode(pos,
function = function,
positional_args = arg_tuple,
keyword_args = keyword_dict)
#lambdef: 'lambda' [varargslist] ':' test
#subscriptlist: subscript (',' subscript)* [',']
def p_index(s, base):
# s.sy == '['
pos = s.position()
s.next()
subscripts, is_single_value = p_subscript_list(s)
if is_single_value and len(subscripts[0]) == 2:
start, stop = subscripts[0]
result = ExprNodes.SliceIndexNode(pos,
base = base, start = start, stop = stop)
else:
indexes = make_slice_nodes(pos, subscripts)
if is_single_value:
index = indexes[0]
else:
index = ExprNodes.TupleNode(pos, args = indexes)
result = ExprNodes.IndexNode(pos,
base = base, index = index)
s.expect(']')
return result
def p_subscript_list(s):
is_single_value = True
items = [p_subscript(s)]
while s.sy == ',':
is_single_value = False
s.next()
if s.sy == ']':
break
items.append(p_subscript(s))
return items, is_single_value
#subscript: '.' '.' '.' | test | [test] ':' [test] [':' [test]]
def p_subscript(s):
# Parse a subscript and return a list of
# 1, 2 or 3 ExprNodes, depending on how
# many slice elements were encountered.
pos = s.position()
start = p_slice_element(s, (':',))
if s.sy != ':':
return [start]
s.next()
stop = p_slice_element(s, (':', ',', ']'))
if s.sy != ':':
return [start, stop]
s.next()
step = p_slice_element(s, (':', ',', ']'))
return [start, stop, step]
def p_slice_element(s, follow_set):
# Simple expression which may be missing iff
# it is followed by something in follow_set.
if s.sy not in follow_set:
return p_test(s)
else:
return None
def expect_ellipsis(s):
s.expect('.')
s.expect('.')
s.expect('.')
def make_slice_nodes(pos, subscripts):
# Convert a list of subscripts as returned
# by p_subscript_list into a list of ExprNodes,
# creating SliceNodes for elements with 2 or
# more components.
result = []
for subscript in subscripts:
if len(subscript) == 1:
result.append(subscript[0])
else:
result.append(make_slice_node(pos, *subscript))
return result
def make_slice_node(pos, start, stop = None, step = None):
if not start:
start = ExprNodes.NoneNode(pos)
if not stop:
stop = ExprNodes.NoneNode(pos)
if not step:
step = ExprNodes.NoneNode(pos)
return ExprNodes.SliceNode(pos,
start = start, stop = stop, step = step)
#atom: '(' [yield_expr|testlist_comp] ')' | '[' [listmaker] ']' | '{' [dict_or_set_maker] '}' | '`' testlist '`' | NAME | NUMBER | STRING+
def p_atom(s):
pos = s.position()
sy = s.sy
if sy == '(':
s.next()
if s.sy == ')':
result = ExprNodes.TupleNode(pos, args = [])
elif s.sy == 'yield':
result = p_yield_expression(s)
else:
result = p_testlist_comp(s)
s.expect(')')
return result
elif sy == '[':
return p_list_maker(s)
elif sy == '{':
return p_dict_or_set_maker(s)
elif sy == '`':
return p_backquote_expr(s)
elif sy == '.':
expect_ellipsis(s)
return ExprNodes.EllipsisNode(pos)
elif sy == 'INT':
return p_int_literal(s)
elif sy == 'FLOAT':
value = s.systring
s.next()
return ExprNodes.FloatNode(pos, value = value)
elif sy == 'IMAG':
value = s.systring[:-1]
s.next()
return ExprNodes.ImagNode(pos, value = value)
elif sy == 'BEGIN_STRING':
kind, bytes_value, unicode_value = p_cat_string_literal(s)
if kind == 'c':
return ExprNodes.CharNode(pos, value = bytes_value)
elif kind == 'u':
return ExprNodes.UnicodeNode(pos, value = unicode_value, bytes_value = bytes_value)
elif kind == 'b':
return ExprNodes.BytesNode(pos, value = bytes_value)
else:
return ExprNodes.StringNode(pos, value = bytes_value, unicode_value = unicode_value)
elif sy == 'IDENT':
name = EncodedString( s.systring )
s.next()
if name == "None":
return ExprNodes.NoneNode(pos)
elif name == "True":
return ExprNodes.BoolNode(pos, value=True)
elif name == "False":
return ExprNodes.BoolNode(pos, value=False)
elif name == "NULL" and not s.in_python_file:
return ExprNodes.NullNode(pos)
else:
return p_name(s, name)
else:
s.error("Expected an identifier or literal")
def p_int_literal(s):
pos = s.position()
value = s.systring
s.next()
unsigned = ""
longness = ""
while value[-1] in u"UuLl":
if value[-1] in u"Ll":
longness += "L"
else:
unsigned += "U"
value = value[:-1]
# '3L' is ambiguous in Py2 but not in Py3. '3U' and '3LL' are
# illegal in Py2 Python files. All suffixes are illegal in Py3
# Python files.
is_c_literal = None
if unsigned:
is_c_literal = True
elif longness:
if longness == 'LL' or s.context.language_level >= 3:
is_c_literal = True
if s.in_python_file:
if is_c_literal:
error(pos, "illegal integer literal syntax in Python source file")
is_c_literal = False
return ExprNodes.IntNode(pos,
is_c_literal = is_c_literal,
value = value,
unsigned = unsigned,
longness = longness)
def p_name(s, name):
pos = s.position()
if not s.compile_time_expr and name in s.compile_time_env:
value = s.compile_time_env.lookup_here(name)
node = wrap_compile_time_constant(pos, value)
if node is not None:
return node
return ExprNodes.NameNode(pos, name=name)
def wrap_compile_time_constant(pos, value):
rep = repr(value)
if value is None:
return ExprNodes.NoneNode(pos)
elif value is Ellipsis:
return ExprNodes.EllipsisNode(pos)
elif isinstance(value, bool):
return ExprNodes.BoolNode(pos, value=value)
elif isinstance(value, int):
return ExprNodes.IntNode(pos, value=rep)
elif isinstance(value, long):
return ExprNodes.IntNode(pos, value=rep, longness="L")
elif isinstance(value, float):
return ExprNodes.FloatNode(pos, value=rep)
elif isinstance(value, _unicode):
return ExprNodes.UnicodeNode(pos, value=EncodedString(value))
elif isinstance(value, _bytes):
return ExprNodes.BytesNode(pos, value=BytesLiteral(value))
elif isinstance(value, tuple):
args = [wrap_compile_time_constant(pos, arg)
for arg in value]
if None not in args:
return ExprNodes.TupleNode(pos, args=args)
else:
# error already reported
return None
error(pos, "Invalid type for compile-time constant: %r (type %s)"
% (value, value.__class__.__name__))
return None
def p_cat_string_literal(s):
# A sequence of one or more adjacent string literals.
# Returns (kind, bytes_value, unicode_value)
# where kind in ('b', 'c', 'u', '')
kind, bytes_value, unicode_value = p_string_literal(s)
if kind == 'c' or s.sy != 'BEGIN_STRING':
return kind, bytes_value, unicode_value
bstrings, ustrings = [bytes_value], [unicode_value]
bytes_value = unicode_value = None
while s.sy == 'BEGIN_STRING':
pos = s.position()
next_kind, next_bytes_value, next_unicode_value = p_string_literal(s)
if next_kind == 'c':
error(pos, "Cannot concatenate char literal with another string or char literal")
elif next_kind != kind:
error(pos, "Cannot mix string literals of different types, expected %s'', got %s''" %
(kind, next_kind))
else:
bstrings.append(next_bytes_value)
ustrings.append(next_unicode_value)
# join and rewrap the partial literals
if kind in ('b', 'c', '') or kind == 'u' and None not in bstrings:
# Py3 enforced unicode literals are parsed as bytes/unicode combination
bytes_value = BytesLiteral( StringEncoding.join_bytes(bstrings) )
bytes_value.encoding = s.source_encoding
if kind in ('u', ''):
unicode_value = EncodedString( u''.join([ u for u in ustrings if u is not None ]) )
return kind, bytes_value, unicode_value
def p_opt_string_literal(s, required_type='u'):
if s.sy == 'BEGIN_STRING':
kind, bytes_value, unicode_value = p_string_literal(s, required_type)
if required_type == 'u':
return unicode_value
elif required_type == 'b':
return bytes_value
else:
s.error("internal parser configuration error")
else:
return None
def check_for_non_ascii_characters(string):
for c in string:
if c >= u'\x80':
return True
return False
def p_string_literal(s, kind_override=None):
# A single string or char literal. Returns (kind, bvalue, uvalue)
# where kind in ('b', 'c', 'u', ''). The 'bvalue' is the source
# code byte sequence of the string literal, 'uvalue' is the
# decoded Unicode string. Either of the two may be None depending
# on the 'kind' of string, only unprefixed strings have both
# representations.
# s.sy == 'BEGIN_STRING'
pos = s.position()
is_raw = False
is_python3_source = s.context.language_level >= 3
has_non_ASCII_literal_characters = False
kind = s.systring[:1].lower()
if kind == 'r':
# Py3 allows both 'br' and 'rb' as prefix
if s.systring[1:2].lower() == 'b':
kind = 'b'
else:
kind = ''
is_raw = True
elif kind in 'ub':
is_raw = s.systring[1:2].lower() == 'r'
elif kind != 'c':
kind = ''
if kind == '' and kind_override is None and Future.unicode_literals in s.context.future_directives:
chars = StringEncoding.StrLiteralBuilder(s.source_encoding)
kind = 'u'
else:
if kind_override is not None and kind_override in 'ub':
kind = kind_override
if kind == 'u':
chars = StringEncoding.UnicodeLiteralBuilder()
elif kind == '':
chars = StringEncoding.StrLiteralBuilder(s.source_encoding)
else:
chars = StringEncoding.BytesLiteralBuilder(s.source_encoding)
while 1:
s.next()
sy = s.sy
systr = s.systring
#print "p_string_literal: sy =", sy, repr(s.systring) ###
if sy == 'CHARS':
chars.append(systr)
if is_python3_source and not has_non_ASCII_literal_characters and check_for_non_ascii_characters(systr):
has_non_ASCII_literal_characters = True
elif sy == 'ESCAPE':
if is_raw:
chars.append(systr)
if is_python3_source and not has_non_ASCII_literal_characters \
and check_for_non_ascii_characters(systr):
has_non_ASCII_literal_characters = True
else:
c = systr[1]
if c in u"01234567":
chars.append_charval( int(systr[1:], 8) )
elif c in u"'\"\\":
chars.append(c)
elif c in u"abfnrtv":
chars.append(
StringEncoding.char_from_escape_sequence(systr))
elif c == u'\n':
pass
elif c == u'x': # \xXX
if len(systr) == 4:
chars.append_charval( int(systr[2:], 16) )
else:
s.error("Invalid hex escape '%s'" % systr,
fatal=False)
elif c in u'NUu' and kind in ('u', ''): # \uxxxx, \Uxxxxxxxx, \N{...}
chrval = -1
if c == u'N':
try:
chrval = ord(lookup_unicodechar(systr[3:-1]))
except KeyError:
s.error("Unknown Unicode character name %s" %
repr(systr[3:-1]).lstrip('u'))
elif len(systr) in (6,10):
chrval = int(systr[2:], 16)
if chrval > 1114111: # sys.maxunicode:
s.error("Invalid unicode escape '%s'" % systr)
chrval = -1
else:
s.error("Invalid unicode escape '%s'" % systr,
fatal=False)
if chrval >= 0:
chars.append_uescape(chrval, systr)
else:
chars.append(u'\\' + systr[1:])
if is_python3_source and not has_non_ASCII_literal_characters \
and check_for_non_ascii_characters(systr):
has_non_ASCII_literal_characters = True
elif sy == 'NEWLINE':
chars.append(u'\n')
elif sy == 'END_STRING':
break
elif sy == 'EOF':
s.error("Unclosed string literal", pos=pos)
else:
s.error("Unexpected token %r:%r in string literal" %
(sy, s.systring))
if kind == 'c':
unicode_value = None
bytes_value = chars.getchar()
if len(bytes_value) != 1:
error(pos, u"invalid character literal: %r" % bytes_value)
else:
bytes_value, unicode_value = chars.getstrings()
if is_python3_source and has_non_ASCII_literal_characters:
# Python 3 forbids literal non-ASCII characters in byte strings
if kind != 'u':
s.error("bytes can only contain ASCII literal characters.",
pos=pos, fatal=False)
bytes_value = None
s.next()
return (kind, bytes_value, unicode_value)
# list_display ::= "[" [listmaker] "]"
# listmaker ::= expression ( comp_for | ( "," expression )* [","] )
# comp_iter ::= comp_for | comp_if
# comp_for ::= "for" expression_list "in" testlist [comp_iter]
# comp_if ::= "if" test [comp_iter]
def p_list_maker(s):
# s.sy == '['
pos = s.position()
s.next()
if s.sy == ']':
s.expect(']')
return ExprNodes.ListNode(pos, args = [])
expr = p_test(s)
if s.sy == 'for':
append = ExprNodes.ComprehensionAppendNode(pos, expr=expr)
loop = p_comp_for(s, append)
s.expect(']')
return ExprNodes.ComprehensionNode(
pos, loop=loop, append=append, type = Builtin.list_type,
# list comprehensions leak their loop variable in Py2
has_local_scope = s.context.language_level >= 3)
else:
if s.sy == ',':
s.next()
exprs = p_simple_expr_list(s, expr)
else:
exprs = [expr]
s.expect(']')
return ExprNodes.ListNode(pos, args = exprs)
def p_comp_iter(s, body):
if s.sy == 'for':
return p_comp_for(s, body)
elif s.sy == 'if':
return p_comp_if(s, body)
else:
# insert the 'append' operation into the loop
return body
def p_comp_for(s, body):
# s.sy == 'for'
pos = s.position()
s.next()
kw = p_for_bounds(s, allow_testlist=False)
kw.update(else_clause = None, body = p_comp_iter(s, body))
return Nodes.ForStatNode(pos, **kw)
def p_comp_if(s, body):
# s.sy == 'if'
pos = s.position()
s.next()
test = p_test_nocond(s)
return Nodes.IfStatNode(pos,
if_clauses = [Nodes.IfClauseNode(pos, condition = test,
body = p_comp_iter(s, body))],
else_clause = None )
#dictmaker: test ':' test (',' test ':' test)* [',']
def p_dict_or_set_maker(s):
# s.sy == '{'
pos = s.position()
s.next()
if s.sy == '}':
s.next()
return ExprNodes.DictNode(pos, key_value_pairs = [])
item = p_test(s)
if s.sy == ',' or s.sy == '}':
# set literal
values = [item]
while s.sy == ',':
s.next()
if s.sy == '}':
break
values.append( p_test(s) )
s.expect('}')
return ExprNodes.SetNode(pos, args=values)
elif s.sy == 'for':
# set comprehension
append = ExprNodes.ComprehensionAppendNode(
item.pos, expr=item)
loop = p_comp_for(s, append)
s.expect('}')
return ExprNodes.ComprehensionNode(
pos, loop=loop, append=append, type=Builtin.set_type)
elif s.sy == ':':
# dict literal or comprehension
key = item
s.next()
value = p_test(s)
if s.sy == 'for':
# dict comprehension
append = ExprNodes.DictComprehensionAppendNode(
item.pos, key_expr=key, value_expr=value)
loop = p_comp_for(s, append)
s.expect('}')
return ExprNodes.ComprehensionNode(
pos, loop=loop, append=append, type=Builtin.dict_type)
else:
# dict literal
items = [ExprNodes.DictItemNode(key.pos, key=key, value=value)]
while s.sy == ',':
s.next()
if s.sy == '}':
break
key = p_test(s)
s.expect(':')
value = p_test(s)
items.append(
ExprNodes.DictItemNode(key.pos, key=key, value=value))
s.expect('}')
return ExprNodes.DictNode(pos, key_value_pairs=items)
else:
# raise an error
s.expect('}')
return ExprNodes.DictNode(pos, key_value_pairs = [])
# NOTE: no longer in Py3 :)
def p_backquote_expr(s):
# s.sy == '`'
pos = s.position()
s.next()
args = [p_test(s)]
while s.sy == ',':
s.next()
args.append(p_test(s))
s.expect('`')
if len(args) == 1:
arg = args[0]
else:
arg = ExprNodes.TupleNode(pos, args = args)
return ExprNodes.BackquoteNode(pos, arg = arg)
def p_simple_expr_list(s, expr=None):
exprs = expr is not None and [expr] or []
while s.sy not in expr_terminators:
exprs.append( p_test(s) )
if s.sy != ',':
break
s.next()
return exprs
def p_test_or_starred_expr_list(s, expr=None):
exprs = expr is not None and [expr] or []
while s.sy not in expr_terminators:
exprs.append( p_test_or_starred_expr(s) )
if s.sy != ',':
break
s.next()
return exprs
#testlist: test (',' test)* [',']
def p_testlist(s):
pos = s.position()
expr = p_test(s)
if s.sy == ',':
s.next()
exprs = p_simple_expr_list(s, expr)
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
# testlist_star_expr: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
def p_testlist_star_expr(s):
pos = s.position()
expr = p_test_or_starred_expr(s)
if s.sy == ',':
s.next()
exprs = p_test_or_starred_expr_list(s, expr)
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
# testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
def p_testlist_comp(s):
pos = s.position()
expr = p_test_or_starred_expr(s)
if s.sy == ',':
s.next()
exprs = p_test_or_starred_expr_list(s, expr)
return ExprNodes.TupleNode(pos, args = exprs)
elif s.sy == 'for':
return p_genexp(s, expr)
else:
return expr
def p_genexp(s, expr):
# s.sy == 'for'
loop = p_comp_for(s, Nodes.ExprStatNode(
expr.pos, expr = ExprNodes.YieldExprNode(expr.pos, arg=expr)))
return ExprNodes.GeneratorExpressionNode(expr.pos, loop=loop)
expr_terminators = cython.declare(set, set([
')', ']', '}', ':', '=', 'NEWLINE']))
#-------------------------------------------------------
#
# Statements
#
#-------------------------------------------------------
def p_global_statement(s):
# assume s.sy == 'global'
pos = s.position()
s.next()
names = p_ident_list(s)
return Nodes.GlobalNode(pos, names = names)
def p_nonlocal_statement(s):
pos = s.position()
s.next()
names = p_ident_list(s)
return Nodes.NonlocalNode(pos, names = names)
def p_expression_or_assignment(s):
expr_list = [p_testlist_star_expr(s)]
if s.sy == '=' and expr_list[0].is_starred:
# This is a common enough error to make when learning Cython to let
# it fail as early as possible and give a very clear error message.
s.error("a starred assignment target must be in a list or tuple"
" - maybe you meant to use an index assignment: var[0] = ...",
pos=expr_list[0].pos)
while s.sy == '=':
s.next()
if s.sy == 'yield':
expr = p_yield_expression(s)
else:
expr = p_testlist_star_expr(s)
expr_list.append(expr)
if len(expr_list) == 1:
if re.match(r"([+*/\%^\&|-]|<<|>>|\*\*|//)=", s.sy):
lhs = expr_list[0]
if isinstance(lhs, ExprNodes.SliceIndexNode):
# implementation requires IndexNode
lhs = ExprNodes.IndexNode(
lhs.pos,
base=lhs.base,
index=make_slice_node(lhs.pos, lhs.start, lhs.stop))
elif not isinstance(lhs, (ExprNodes.AttributeNode, ExprNodes.IndexNode, ExprNodes.NameNode) ):
error(lhs.pos, "Illegal operand for inplace operation.")
operator = s.sy[:-1]
s.next()
if s.sy == 'yield':
rhs = p_yield_expression(s)
else:
rhs = p_testlist(s)
return Nodes.InPlaceAssignmentNode(lhs.pos, operator = operator, lhs = lhs, rhs = rhs)
expr = expr_list[0]
return Nodes.ExprStatNode(expr.pos, expr=expr)
rhs = expr_list[-1]
if len(expr_list) == 2:
return Nodes.SingleAssignmentNode(rhs.pos,
lhs = expr_list[0], rhs = rhs)
else:
return Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = expr_list[:-1], rhs = rhs)
def p_print_statement(s):
# s.sy == 'print'
pos = s.position()
ends_with_comma = 0
s.next()
if s.sy == '>>':
s.next()
stream = p_test(s)
if s.sy == ',':
s.next()
ends_with_comma = s.sy in ('NEWLINE', 'EOF')
else:
stream = None
args = []
if s.sy not in ('NEWLINE', 'EOF'):
args.append(p_test(s))
while s.sy == ',':
s.next()
if s.sy in ('NEWLINE', 'EOF'):
ends_with_comma = 1
break
args.append(p_test(s))
arg_tuple = ExprNodes.TupleNode(pos, args = args)
return Nodes.PrintStatNode(pos,
arg_tuple = arg_tuple, stream = stream,
append_newline = not ends_with_comma)
def p_exec_statement(s):
# s.sy == 'exec'
pos = s.position()
s.next()
code = p_bit_expr(s)
if isinstance(code, ExprNodes.TupleNode):
# Py3 compatibility syntax
tuple_variant = True
args = code.args
if len(args) not in (2, 3):
s.error("expected tuple of length 2 or 3, got length %d" % len(args),
pos=pos, fatal=False)
args = [code]
else:
tuple_variant = False
args = [code]
if s.sy == 'in':
if tuple_variant:
s.error("tuple variant of exec does not support additional 'in' arguments",
fatal=False)
s.next()
args.append(p_test(s))
if s.sy == ',':
s.next()
args.append(p_test(s))
return Nodes.ExecStatNode(pos, args=args)
def p_del_statement(s):
# s.sy == 'del'
pos = s.position()
s.next()
# FIXME: 'exprlist' in Python
args = p_simple_expr_list(s)
return Nodes.DelStatNode(pos, args = args)
def p_pass_statement(s, with_newline = 0):
pos = s.position()
s.expect('pass')
if with_newline:
s.expect_newline("Expected a newline")
return Nodes.PassStatNode(pos)
def p_break_statement(s):
# s.sy == 'break'
pos = s.position()
s.next()
return Nodes.BreakStatNode(pos)
def p_continue_statement(s):
# s.sy == 'continue'
pos = s.position()
s.next()
return Nodes.ContinueStatNode(pos)
def p_return_statement(s):
# s.sy == 'return'
pos = s.position()
s.next()
if s.sy not in statement_terminators:
value = p_testlist(s)
else:
value = None
return Nodes.ReturnStatNode(pos, value = value)
def p_raise_statement(s):
# s.sy == 'raise'
pos = s.position()
s.next()
exc_type = None
exc_value = None
exc_tb = None
cause = None
if s.sy not in statement_terminators:
exc_type = p_test(s)
if s.sy == ',':
s.next()
exc_value = p_test(s)
if s.sy == ',':
s.next()
exc_tb = p_test(s)
elif s.sy == 'from':
s.next()
cause = p_test(s)
if exc_type or exc_value or exc_tb:
return Nodes.RaiseStatNode(pos,
exc_type = exc_type,
exc_value = exc_value,
exc_tb = exc_tb,
cause = cause)
else:
return Nodes.ReraiseStatNode(pos)
def p_import_statement(s):
# s.sy in ('import', 'cimport')
pos = s.position()
kind = s.sy
s.next()
items = [p_dotted_name(s, as_allowed = 1)]
while s.sy == ',':
s.next()
items.append(p_dotted_name(s, as_allowed = 1))
stats = []
for pos, target_name, dotted_name, as_name in items:
dotted_name = EncodedString(dotted_name)
if kind == 'cimport':
stat = Nodes.CImportStatNode(pos,
module_name = dotted_name,
as_name = as_name)
else:
if as_name and "." in dotted_name:
name_list = ExprNodes.ListNode(pos, args = [
ExprNodes.IdentifierStringNode(pos, value = EncodedString("*"))])
else:
name_list = None
stat = Nodes.SingleAssignmentNode(pos,
lhs = ExprNodes.NameNode(pos,
name = as_name or target_name),
rhs = ExprNodes.ImportNode(pos,
module_name = ExprNodes.IdentifierStringNode(
pos, value = dotted_name),
level = None,
name_list = name_list))
stats.append(stat)
return Nodes.StatListNode(pos, stats = stats)
def p_from_import_statement(s, first_statement = 0):
# s.sy == 'from'
pos = s.position()
s.next()
if s.sy == '.':
# count relative import level
level = 0
while s.sy == '.':
level += 1
s.next()
if s.sy == 'cimport':
s.error("Relative cimport is not supported yet")
else:
level = None
if level is not None and s.sy == 'import':
# we are dealing with "from .. import foo, bar"
dotted_name_pos, dotted_name = s.position(), ''
elif level is not None and s.sy == 'cimport':
# "from .. cimport"
s.error("Relative cimport is not supported yet")
else:
(dotted_name_pos, _, dotted_name, _) = \
p_dotted_name(s, as_allowed = 0)
if s.sy in ('import', 'cimport'):
kind = s.sy
s.next()
else:
s.error("Expected 'import' or 'cimport'")
is_cimport = kind == 'cimport'
is_parenthesized = False
if s.sy == '*':
imported_names = [(s.position(), "*", None, None)]
s.next()
else:
if s.sy == '(':
is_parenthesized = True
s.next()
imported_names = [p_imported_name(s, is_cimport)]
while s.sy == ',':
s.next()
if is_parenthesized and s.sy == ')':
break
imported_names.append(p_imported_name(s, is_cimport))
if is_parenthesized:
s.expect(')')
dotted_name = EncodedString(dotted_name)
if dotted_name == '__future__':
if not first_statement:
s.error("from __future__ imports must occur at the beginning of the file")
elif level is not None:
s.error("invalid syntax")
else:
for (name_pos, name, as_name, kind) in imported_names:
if name == "braces":
s.error("not a chance", name_pos)
break
try:
directive = getattr(Future, name)
except AttributeError:
s.error("future feature %s is not defined" % name, name_pos)
break
s.context.future_directives.add(directive)
return Nodes.PassStatNode(pos)
elif kind == 'cimport':
return Nodes.FromCImportStatNode(pos,
module_name = dotted_name,
imported_names = imported_names)
else:
imported_name_strings = []
items = []
for (name_pos, name, as_name, kind) in imported_names:
encoded_name = EncodedString(name)
imported_name_strings.append(
ExprNodes.IdentifierStringNode(name_pos, value = encoded_name))
items.append(
(name,
ExprNodes.NameNode(name_pos,
name = as_name or name)))
import_list = ExprNodes.ListNode(
imported_names[0][0], args = imported_name_strings)
dotted_name = EncodedString(dotted_name)
return Nodes.FromImportStatNode(pos,
module = ExprNodes.ImportNode(dotted_name_pos,
module_name = ExprNodes.IdentifierStringNode(pos, value = dotted_name),
level = level,
name_list = import_list),
items = items)
imported_name_kinds = cython.declare(
set, set(['class', 'struct', 'union']))
def p_imported_name(s, is_cimport):
pos = s.position()
kind = None
if is_cimport and s.systring in imported_name_kinds:
kind = s.systring
s.next()
name = p_ident(s)
as_name = p_as_name(s)
return (pos, name, as_name, kind)
def p_dotted_name(s, as_allowed):
pos = s.position()
target_name = p_ident(s)
as_name = None
names = [target_name]
while s.sy == '.':
s.next()
names.append(p_ident(s))
if as_allowed:
as_name = p_as_name(s)
return (pos, target_name, u'.'.join(names), as_name)
def p_as_name(s):
if s.sy == 'IDENT' and s.systring == 'as':
s.next()
return p_ident(s)
else:
return None
def p_assert_statement(s):
# s.sy == 'assert'
pos = s.position()
s.next()
cond = p_test(s)
if s.sy == ',':
s.next()
value = p_test(s)
else:
value = None
return Nodes.AssertStatNode(pos, cond = cond, value = value)
statement_terminators = cython.declare(set, set([';', 'NEWLINE', 'EOF']))
def p_if_statement(s):
# s.sy == 'if'
pos = s.position()
s.next()
if_clauses = [p_if_clause(s)]
while s.sy == 'elif':
s.next()
if_clauses.append(p_if_clause(s))
else_clause = p_else_clause(s)
return Nodes.IfStatNode(pos,
if_clauses = if_clauses, else_clause = else_clause)
def p_if_clause(s):
pos = s.position()
test = p_test(s)
body = p_suite(s)
return Nodes.IfClauseNode(pos,
condition = test, body = body)
def p_else_clause(s):
if s.sy == 'else':
s.next()
return p_suite(s)
else:
return None
def p_while_statement(s):
# s.sy == 'while'
pos = s.position()
s.next()
test = p_test(s)
body = p_suite(s)
else_clause = p_else_clause(s)
return Nodes.WhileStatNode(pos,
condition = test, body = body,
else_clause = else_clause)
def p_for_statement(s):
# s.sy == 'for'
pos = s.position()
s.next()
kw = p_for_bounds(s, allow_testlist=True)
body = p_suite(s)
else_clause = p_else_clause(s)
kw.update(body = body, else_clause = else_clause)
return Nodes.ForStatNode(pos, **kw)
def p_for_bounds(s, allow_testlist=True):
target = p_for_target(s)
if s.sy == 'in':
s.next()
iterator = p_for_iterator(s, allow_testlist)
return dict( target = target, iterator = iterator )
elif not s.in_python_file:
if s.sy == 'from':
s.next()
bound1 = p_bit_expr(s)
else:
# Support shorter "for a <= x < b" syntax
bound1, target = target, None
rel1 = p_for_from_relation(s)
name2_pos = s.position()
name2 = p_ident(s)
rel2_pos = s.position()
rel2 = p_for_from_relation(s)
bound2 = p_bit_expr(s)
step = p_for_from_step(s)
if target is None:
target = ExprNodes.NameNode(name2_pos, name = name2)
else:
if not target.is_name:
error(target.pos,
"Target of for-from statement must be a variable name")
elif name2 != target.name:
error(name2_pos,
"Variable name in for-from range does not match target")
if rel1[0] != rel2[0]:
error(rel2_pos,
"Relation directions in for-from do not match")
return dict(target = target,
bound1 = bound1,
relation1 = rel1,
relation2 = rel2,
bound2 = bound2,
step = step,
)
else:
s.expect('in')
return {}
def p_for_from_relation(s):
if s.sy in inequality_relations:
op = s.sy
s.next()
return op
else:
s.error("Expected one of '<', '<=', '>' '>='")
def p_for_from_step(s):
if s.sy == 'IDENT' and s.systring == 'by':
s.next()
step = p_bit_expr(s)
return step
else:
return None
inequality_relations = cython.declare(set, set(['<', '<=', '>', '>=']))
def p_target(s, terminator):
pos = s.position()
expr = p_starred_expr(s)
if s.sy == ',':
s.next()
exprs = [expr]
while s.sy != terminator:
exprs.append(p_starred_expr(s))
if s.sy != ',':
break
s.next()
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
def p_for_target(s):
return p_target(s, 'in')
def p_for_iterator(s, allow_testlist=True):
pos = s.position()
if allow_testlist:
expr = p_testlist(s)
else:
expr = p_or_test(s)
return ExprNodes.IteratorNode(pos, sequence = expr)
def p_try_statement(s):
# s.sy == 'try'
pos = s.position()
s.next()
body = p_suite(s)
except_clauses = []
else_clause = None
if s.sy in ('except', 'else'):
while s.sy == 'except':
except_clauses.append(p_except_clause(s))
if s.sy == 'else':
s.next()
else_clause = p_suite(s)
body = Nodes.TryExceptStatNode(pos,
body = body, except_clauses = except_clauses,
else_clause = else_clause)
if s.sy != 'finally':
return body
# try-except-finally is equivalent to nested try-except/try-finally
if s.sy == 'finally':
s.next()
finally_clause = p_suite(s)
return Nodes.TryFinallyStatNode(pos,
body = body, finally_clause = finally_clause)
else:
s.error("Expected 'except' or 'finally'")
def p_except_clause(s):
# s.sy == 'except'
pos = s.position()
s.next()
exc_type = None
exc_value = None
is_except_as = False
if s.sy != ':':
exc_type = p_test(s)
# normalise into list of single exception tests
if isinstance(exc_type, ExprNodes.TupleNode):
exc_type = exc_type.args
else:
exc_type = [exc_type]
if s.sy == ',' or (s.sy == 'IDENT' and s.systring == 'as'
and s.context.language_level == 2):
s.next()
exc_value = p_test(s)
elif s.sy == 'IDENT' and s.systring == 'as':
# Py3 syntax requires a name here
s.next()
pos2 = s.position()
name = p_ident(s)
exc_value = ExprNodes.NameNode(pos2, name = name)
is_except_as = True
body = p_suite(s)
return Nodes.ExceptClauseNode(pos,
pattern = exc_type, target = exc_value,
body = body, is_except_as=is_except_as)
def p_include_statement(s, ctx):
pos = s.position()
s.next() # 'include'
unicode_include_file_name = p_string_literal(s, 'u')[2]
s.expect_newline("Syntax error in include statement")
if s.compile_time_eval:
include_file_name = unicode_include_file_name
include_file_path = s.context.find_include_file(include_file_name, pos)
if include_file_path:
s.included_files.append(include_file_name)
f = Utils.open_source_file(include_file_path, mode="rU")
source_desc = FileSourceDescriptor(include_file_path)
s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments)
try:
tree = p_statement_list(s2, ctx)
finally:
f.close()
return tree
else:
return None
else:
return Nodes.PassStatNode(pos)
def p_with_statement(s):
s.next() # 'with'
if s.systring == 'template' and not s.in_python_file:
node = p_with_template(s)
else:
node = p_with_items(s)
return node
def p_with_items(s):
pos = s.position()
if not s.in_python_file and s.sy == 'IDENT' and s.systring in ('nogil', 'gil'):
state = s.systring
s.next()
if s.sy == ',':
s.next()
body = p_with_items(s)
else:
body = p_suite(s)
return Nodes.GILStatNode(pos, state = state, body = body)
else:
manager = p_test(s)
target = None
if s.sy == 'IDENT' and s.systring == 'as':
s.next()
target = p_starred_expr(s)
if s.sy == ',':
s.next()
body = p_with_items(s)
else:
body = p_suite(s)
return Nodes.WithStatNode(pos, manager = manager,
target = target, body = body)
def p_with_template(s):
pos = s.position()
templates = []
s.next()
s.expect('[')
templates.append(s.systring)
s.next()
while s.systring == ',':
s.next()
templates.append(s.systring)
s.next()
s.expect(']')
if s.sy == ':':
s.next()
s.expect_newline("Syntax error in template function declaration")
s.expect_indent()
body_ctx = Ctx()
body_ctx.templates = templates
func_or_var = p_c_func_or_var_declaration(s, pos, body_ctx)
s.expect_dedent()
return func_or_var
else:
error(pos, "Syntax error in template function declaration")
def p_simple_statement(s, first_statement = 0):
#print "p_simple_statement:", s.sy, s.systring ###
if s.sy == 'global':
node = p_global_statement(s)
elif s.sy == 'nonlocal':
node = p_nonlocal_statement(s)
elif s.sy == 'print':
node = p_print_statement(s)
elif s.sy == 'exec':
node = p_exec_statement(s)
elif s.sy == 'del':
node = p_del_statement(s)
elif s.sy == 'break':
node = p_break_statement(s)
elif s.sy == 'continue':
node = p_continue_statement(s)
elif s.sy == 'return':
node = p_return_statement(s)
elif s.sy == 'raise':
node = p_raise_statement(s)
elif s.sy in ('import', 'cimport'):
node = p_import_statement(s)
elif s.sy == 'from':
node = p_from_import_statement(s, first_statement = first_statement)
elif s.sy == 'yield':
node = p_yield_statement(s)
elif s.sy == 'assert':
node = p_assert_statement(s)
elif s.sy == 'pass':
node = p_pass_statement(s)
else:
node = p_expression_or_assignment(s)
return node
def p_simple_statement_list(s, ctx, first_statement = 0):
# Parse a series of simple statements on one line
# separated by semicolons.
stat = p_simple_statement(s, first_statement = first_statement)
pos = stat.pos
stats = []
if not isinstance(stat, Nodes.PassStatNode):
stats.append(stat)
while s.sy == ';':
#print "p_simple_statement_list: maybe more to follow" ###
s.next()
if s.sy in ('NEWLINE', 'EOF'):
break
stat = p_simple_statement(s, first_statement = first_statement)
if isinstance(stat, Nodes.PassStatNode):
continue
stats.append(stat)
first_statement = False
if not stats:
stat = Nodes.PassStatNode(pos)
elif len(stats) == 1:
stat = stats[0]
else:
stat = Nodes.StatListNode(pos, stats = stats)
s.expect_newline("Syntax error in simple statement list")
return stat
def p_compile_time_expr(s):
old = s.compile_time_expr
s.compile_time_expr = 1
expr = p_testlist(s)
s.compile_time_expr = old
return expr
def p_DEF_statement(s):
pos = s.position()
denv = s.compile_time_env
s.next() # 'DEF'
name = p_ident(s)
s.expect('=')
expr = p_compile_time_expr(s)
value = expr.compile_time_value(denv)
#print "p_DEF_statement: %s = %r" % (name, value) ###
denv.declare(name, value)
s.expect_newline()
return Nodes.PassStatNode(pos)
def p_IF_statement(s, ctx):
pos = s.position()
saved_eval = s.compile_time_eval
current_eval = saved_eval
denv = s.compile_time_env
result = None
while 1:
s.next() # 'IF' or 'ELIF'
expr = p_compile_time_expr(s)
s.compile_time_eval = current_eval and bool(expr.compile_time_value(denv))
body = p_suite(s, ctx)
if s.compile_time_eval:
result = body
current_eval = 0
if s.sy != 'ELIF':
break
if s.sy == 'ELSE':
s.next()
s.compile_time_eval = current_eval
body = p_suite(s, ctx)
if current_eval:
result = body
if not result:
result = Nodes.PassStatNode(pos)
s.compile_time_eval = saved_eval
return result
def p_statement(s, ctx, first_statement = 0):
cdef_flag = ctx.cdef_flag
decorators = None
if s.sy == 'ctypedef':
if ctx.level not in ('module', 'module_pxd'):
s.error("ctypedef statement not allowed here")
#if ctx.api:
# error(s.position(), "'api' not allowed with 'ctypedef'")
return p_ctypedef_statement(s, ctx)
elif s.sy == 'DEF':
return p_DEF_statement(s)
elif s.sy == 'IF':
return p_IF_statement(s, ctx)
elif s.sy == 'DECORATOR':
if ctx.level not in ('module', 'class', 'c_class', 'function', 'property', 'module_pxd', 'c_class_pxd', 'other'):
s.error('decorator not allowed here')
s.level = ctx.level
decorators = p_decorators(s)
bad_toks = 'def', 'cdef', 'cpdef', 'class'
if not ctx.allow_struct_enum_decorator and s.sy not in bad_toks:
s.error("Decorators can only be followed by functions or classes")
elif s.sy == 'pass' and cdef_flag:
# empty cdef block
return p_pass_statement(s, with_newline = 1)
overridable = 0
if s.sy == 'cdef':
cdef_flag = 1
s.next()
elif s.sy == 'cpdef':
cdef_flag = 1
overridable = 1
s.next()
if cdef_flag:
if ctx.level not in ('module', 'module_pxd', 'function', 'c_class', 'c_class_pxd'):
s.error('cdef statement not allowed here')
s.level = ctx.level
node = p_cdef_statement(s, ctx(overridable = overridable))
if decorators is not None:
tup = Nodes.CFuncDefNode, Nodes.CVarDefNode, Nodes.CClassDefNode
if ctx.allow_struct_enum_decorator:
tup += Nodes.CStructOrUnionDefNode, Nodes.CEnumDefNode
if not isinstance(node, tup):
s.error("Decorators can only be followed by functions or classes")
node.decorators = decorators
return node
else:
if ctx.api:
s.error("'api' not allowed with this statement", fatal=False)
elif s.sy == 'def':
# def statements aren't allowed in pxd files, except
# as part of a cdef class
if ('pxd' in ctx.level) and (ctx.level != 'c_class_pxd'):
s.error('def statement not allowed here')
s.level = ctx.level
return p_def_statement(s, decorators)
elif s.sy == 'class':
if ctx.level not in ('module', 'function', 'class', 'other'):
s.error("class definition not allowed here")
return p_class_statement(s, decorators)
elif s.sy == 'include':
if ctx.level not in ('module', 'module_pxd'):
s.error("include statement not allowed here")
return p_include_statement(s, ctx)
elif ctx.level == 'c_class' and s.sy == 'IDENT' and s.systring == 'property':
return p_property_decl(s)
elif s.sy == 'pass' and ctx.level != 'property':
return p_pass_statement(s, with_newline=True)
else:
if ctx.level in ('c_class_pxd', 'property'):
node = p_ignorable_statement(s)
if node is not None:
return node
s.error("Executable statement not allowed here")
if s.sy == 'if':
return p_if_statement(s)
elif s.sy == 'while':
return p_while_statement(s)
elif s.sy == 'for':
return p_for_statement(s)
elif s.sy == 'try':
return p_try_statement(s)
elif s.sy == 'with':
return p_with_statement(s)
else:
return p_simple_statement_list(
s, ctx, first_statement = first_statement)
def p_statement_list(s, ctx, first_statement = 0):
# Parse a series of statements separated by newlines.
pos = s.position()
stats = []
while s.sy not in ('DEDENT', 'EOF'):
stat = p_statement(s, ctx, first_statement = first_statement)
if isinstance(stat, Nodes.PassStatNode):
continue
stats.append(stat)
first_statement = False
if not stats:
return Nodes.PassStatNode(pos)
elif len(stats) == 1:
return stats[0]
else:
return Nodes.StatListNode(pos, stats = stats)
def p_suite(s, ctx=Ctx()):
return p_suite_with_docstring(s, ctx, with_doc_only=False)[1]
def p_suite_with_docstring(s, ctx, with_doc_only=False):
s.expect(':')
doc = None
if s.sy == 'NEWLINE':
s.next()
s.expect_indent()
if with_doc_only:
doc = p_doc_string(s)
body = p_statement_list(s, ctx)
s.expect_dedent()
else:
if ctx.api:
s.error("'api' not allowed with this statement", fatal=False)
if ctx.level in ('module', 'class', 'function', 'other'):
body = p_simple_statement_list(s, ctx)
else:
body = p_pass_statement(s)
s.expect_newline("Syntax error in declarations")
if not with_doc_only:
doc, body = _extract_docstring(body)
return doc, body
def p_positional_and_keyword_args(s, end_sy_set, templates = None):
"""
Parses positional and keyword arguments. end_sy_set
should contain any s.sy that terminate the argument list.
Argument expansion (* and **) are not allowed.
Returns: (positional_args, keyword_args)
"""
positional_args = []
keyword_args = []
pos_idx = 0
while s.sy not in end_sy_set:
if s.sy == '*' or s.sy == '**':
s.error('Argument expansion not allowed here.', fatal=False)
parsed_type = False
if s.sy == 'IDENT' and s.peek()[0] == '=':
ident = s.systring
s.next() # s.sy is '='
s.next()
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
keyword_node = ExprNodes.IdentifierStringNode(
arg.pos, value = EncodedString(ident))
keyword_args.append((keyword_node, arg))
was_keyword = True
else:
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
positional_args.append(arg)
pos_idx += 1
if len(keyword_args) > 0:
s.error("Non-keyword arg following keyword arg",
pos=arg.pos)
if s.sy != ',':
if s.sy not in end_sy_set:
if parsed_type:
s.error("Unmatched %s" % " or ".join(end_sy_set))
break
s.next()
return positional_args, keyword_args
def p_c_base_type(s, self_flag = 0, nonempty = 0, templates = None):
# If self_flag is true, this is the base type for the
# self argument of a C method of an extension type.
if s.sy == '(':
return p_c_complex_base_type(s, templates = templates)
else:
return p_c_simple_base_type(s, self_flag, nonempty = nonempty, templates = templates)
def p_calling_convention(s):
if s.sy == 'IDENT' and s.systring in calling_convention_words:
result = s.systring
s.next()
return result
else:
return ""
calling_convention_words = cython.declare(
set, set(["__stdcall", "__cdecl", "__fastcall"]))
def p_c_complex_base_type(s, templates = None):
# s.sy == '('
pos = s.position()
s.next()
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
s.expect(')')
type_node = Nodes.CComplexBaseTypeNode(pos,
base_type = base_type, declarator = declarator)
if s.sy == '[':
if is_memoryviewslice_access(s):
type_node = p_memoryviewslice_access(s, type_node)
else:
type_node = p_buffer_or_template(s, type_node, templates)
return type_node
def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
#print "p_c_simple_base_type: self_flag =", self_flag, nonempty
is_basic = 0
signed = 1
longness = 0
complex = 0
module_path = []
pos = s.position()
if not s.sy == 'IDENT':
error(pos, "Expected an identifier, found '%s'" % s.sy)
if s.systring == 'const':
s.next()
base_type = p_c_base_type(s,
self_flag = self_flag, nonempty = nonempty, templates = templates)
return Nodes.CConstTypeNode(pos, base_type = base_type)
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
if s.sy == 'IDENT' and s.systring in special_basic_c_types:
signed, longness = special_basic_c_types[s.systring]
name = s.systring
s.next()
else:
signed, longness = p_sign_and_longness(s)
if s.sy == 'IDENT' and s.systring in basic_c_type_names:
name = s.systring
s.next()
else:
name = 'int' # long [int], short [int], long [int] complex, etc.
if s.sy == 'IDENT' and s.systring == 'complex':
complex = 1
s.next()
elif looking_at_dotted_name(s):
#print "p_c_simple_base_type: looking_at_type_name at", s.position()
name = s.systring
s.next()
while s.sy == '.':
module_path.append(name)
s.next()
name = p_ident(s)
else:
name = s.systring
s.next()
if nonempty and s.sy != 'IDENT':
# Make sure this is not a declaration of a variable or function.
if s.sy == '(':
s.next()
if (s.sy == '*' or s.sy == '**' or s.sy == '&'
or (s.sy == 'IDENT' and s.systring in calling_convention_words)):
s.put_back('(', '(')
else:
s.put_back('(', '(')
s.put_back('IDENT', name)
name = None
elif s.sy not in ('*', '**', '[', '&'):
s.put_back('IDENT', name)
name = None
type_node = Nodes.CSimpleBaseTypeNode(pos,
name = name, module_path = module_path,
is_basic_c_type = is_basic, signed = signed,
complex = complex, longness = longness,
is_self_arg = self_flag, templates = templates)
# declarations here.
if s.sy == '[':
if is_memoryviewslice_access(s):
type_node = p_memoryviewslice_access(s, type_node)
else:
type_node = p_buffer_or_template(s, type_node, templates)
if s.sy == '.':
s.next()
name = p_ident(s)
type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name)
return type_node
def p_buffer_or_template(s, base_type_node, templates):
# s.sy == '['
pos = s.position()
s.next()
# Note that buffer_positional_options_count=1, so the only positional argument is dtype.
# For templated types, all parameters are types.
positional_args, keyword_args = (
p_positional_and_keyword_args(s, (']',), templates)
)
s.expect(']')
if s.sy == '[':
base_type_node = p_buffer_or_template(s, base_type_node, templates)
keyword_dict = ExprNodes.DictNode(pos,
key_value_pairs = [
ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)
for key, value in keyword_args
])
result = Nodes.TemplatedTypeNode(pos,
positional_args = positional_args,
keyword_args = keyword_dict,
base_type_node = base_type_node)
return result
def p_bracketed_base_type(s, base_type_node, nonempty, empty):
# s.sy == '['
if empty and not nonempty:
# sizeof-like thing. Only anonymous C arrays allowed (int[SIZE]).
return base_type_node
elif not empty and nonempty:
# declaration of either memoryview slice or buffer.
if is_memoryviewslice_access(s):
return p_memoryviewslice_access(s, base_type_node)
else:
return p_buffer_or_template(s, base_type_node, None)
# return p_buffer_access(s, base_type_node)
elif not empty and not nonempty:
# only anonymous C arrays and memoryview slice arrays here. We
# disallow buffer declarations for now, due to ambiguity with anonymous
# C arrays.
if is_memoryviewslice_access(s):
return p_memoryviewslice_access(s, base_type_node)
else:
return base_type_node
def is_memoryviewslice_access(s):
# s.sy == '['
# a memoryview slice declaration is distinguishable from a buffer access
# declaration by the first entry in the bracketed list. The buffer will
# not have an unnested colon in the first entry; the memoryview slice will.
saved = [(s.sy, s.systring)]
s.next()
retval = False
if s.systring == ':':
retval = True
elif s.sy == 'INT':
saved.append((s.sy, s.systring))
s.next()
if s.sy == ':':
retval = True
for sv in saved[::-1]:
s.put_back(*sv)
return retval
def p_memoryviewslice_access(s, base_type_node):
# s.sy == '['
pos = s.position()
s.next()
subscripts, _ = p_subscript_list(s)
# make sure each entry in subscripts is a slice
for subscript in subscripts:
if len(subscript) < 2:
s.error("An axis specification in memoryview declaration does not have a ':'.")
s.expect(']')
indexes = make_slice_nodes(pos, subscripts)
result = Nodes.MemoryViewSliceTypeNode(pos,
base_type_node = base_type_node,
axes = indexes)
return result
def looking_at_name(s):
return s.sy == 'IDENT' and not s.systring in calling_convention_words
def looking_at_expr(s):
if s.systring in base_type_start_words:
return False
elif s.sy == 'IDENT':
is_type = False
name = s.systring
dotted_path = []
s.next()
while s.sy == '.':
s.next()
dotted_path.append(s.systring)
s.expect('IDENT')
saved = s.sy, s.systring
if s.sy == 'IDENT':
is_type = True
elif s.sy == '*' or s.sy == '**':
s.next()
is_type = s.sy in (')', ']')
s.put_back(*saved)
elif s.sy == '(':
s.next()
is_type = s.sy == '*'
s.put_back(*saved)
elif s.sy == '[':
s.next()
is_type = s.sy == ']'
s.put_back(*saved)
dotted_path.reverse()
for p in dotted_path:
s.put_back('IDENT', p)
s.put_back('.', '.')
s.put_back('IDENT', name)
return not is_type and saved[0]
else:
return True
def looking_at_base_type(s):
#print "looking_at_base_type?", s.sy, s.systring, s.position()
return s.sy == 'IDENT' and s.systring in base_type_start_words
def looking_at_dotted_name(s):
if s.sy == 'IDENT':
name = s.systring
s.next()
result = s.sy == '.'
s.put_back('IDENT', name)
return result
else:
return 0
def looking_at_call(s):
"See if we're looking at a.b.c("
# Don't mess up the original position, so save and restore it.
# Unfortunately there's no good way to handle this, as a subsequent call
# to next() will not advance the position until it reads a new token.
position = s.start_line, s.start_col
result = looking_at_expr(s) == u'('
if not result:
s.start_line, s.start_col = position
return result
basic_c_type_names = cython.declare(
set, set(["void", "char", "int", "float", "double", "bint"]))
special_basic_c_types = cython.declare(dict, {
# name : (signed, longness)
"Py_UNICODE" : (0, 0),
"Py_UCS4" : (0, 0),
"Py_ssize_t" : (2, 0),
"ssize_t" : (2, 0),
"size_t" : (0, 0),
"ptrdiff_t" : (2, 0),
})
sign_and_longness_words = cython.declare(
set, set(["short", "long", "signed", "unsigned"]))
base_type_start_words = cython.declare(
set,
basic_c_type_names
| sign_and_longness_words
| set(special_basic_c_types))
struct_enum_union = cython.declare(
set, set(["struct", "union", "enum", "packed"]))
def p_sign_and_longness(s):
signed = 1
longness = 0
while s.sy == 'IDENT' and s.systring in sign_and_longness_words:
if s.systring == 'unsigned':
signed = 0
elif s.systring == 'signed':
signed = 2
elif s.systring == 'short':
longness = -1
elif s.systring == 'long':
longness += 1
s.next()
return signed, longness
def p_opt_cname(s):
literal = p_opt_string_literal(s, 'u')
if literal is not None:
cname = EncodedString(literal)
cname.encoding = s.source_encoding
else:
cname = None
return cname
def p_c_declarator(s, ctx = Ctx(), empty = 0, is_type = 0, cmethod_flag = 0,
assignable = 0, nonempty = 0,
calling_convention_allowed = 0):
# If empty is true, the declarator must be empty. If nonempty is true,
# the declarator must be nonempty. Otherwise we don't care.
# If cmethod_flag is true, then if this declarator declares
# a function, it's a C method of an extension type.
pos = s.position()
if s.sy == '(':
s.next()
if s.sy == ')' or looking_at_name(s):
base = Nodes.CNameDeclaratorNode(pos, name = EncodedString(u""), cname = None)
result = p_c_func_declarator(s, pos, ctx, base, cmethod_flag)
else:
result = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
nonempty = nonempty,
calling_convention_allowed = 1)
s.expect(')')
else:
result = p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag,
assignable, nonempty)
if not calling_convention_allowed and result.calling_convention and s.sy != '(':
error(s.position(), "%s on something that is not a function"
% result.calling_convention)
while s.sy in ('[', '('):
pos = s.position()
if s.sy == '[':
result = p_c_array_declarator(s, result)
else: # sy == '('
s.next()
result = p_c_func_declarator(s, pos, ctx, result, cmethod_flag)
cmethod_flag = 0
return result
def p_c_array_declarator(s, base):
pos = s.position()
s.next() # '['
if s.sy != ']':
dim = p_testlist(s)
else:
dim = None
s.expect(']')
return Nodes.CArrayDeclaratorNode(pos, base = base, dimension = dim)
def p_c_func_declarator(s, pos, ctx, base, cmethod_flag):
# Opening paren has already been skipped
args = p_c_arg_list(s, ctx, cmethod_flag = cmethod_flag,
nonempty_declarators = 0)
ellipsis = p_optional_ellipsis(s)
s.expect(')')
nogil = p_nogil(s)
exc_val, exc_check = p_exception_value_clause(s)
with_gil = p_with_gil(s)
return Nodes.CFuncDeclaratorNode(pos,
base = base, args = args, has_varargs = ellipsis,
exception_value = exc_val, exception_check = exc_check,
nogil = nogil or ctx.nogil or with_gil, with_gil = with_gil)
supported_overloaded_operators = cython.declare(set, set([
'+', '-', '*', '/', '%',
'++', '--', '~', '|', '&', '^', '<<', '>>', ',',
'==', '!=', '>=', '>', '<=', '<',
'[]', '()', '!',
]))
def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag,
assignable, nonempty):
pos = s.position()
calling_convention = p_calling_convention(s)
if s.sy == '*':
s.next()
if s.systring == 'const':
const_pos = s.position()
s.next()
const_base = p_c_declarator(s, ctx, empty = empty,
is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable,
nonempty = nonempty)
base = Nodes.CConstDeclaratorNode(const_pos, base = const_base)
else:
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
result = Nodes.CPtrDeclaratorNode(pos,
base = base)
elif s.sy == '**': # scanner returns this as a single token
s.next()
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
result = Nodes.CPtrDeclaratorNode(pos,
base = Nodes.CPtrDeclaratorNode(pos,
base = base))
elif s.sy == '&':
s.next()
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
result = Nodes.CReferenceDeclaratorNode(pos, base = base)
else:
rhs = None
if s.sy == 'IDENT':
name = EncodedString(s.systring)
if empty:
error(s.position(), "Declarator should be empty")
s.next()
cname = p_opt_cname(s)
if name != 'operator' and s.sy == '=' and assignable:
s.next()
rhs = p_test(s)
else:
if nonempty:
error(s.position(), "Empty declarator")
name = ""
cname = None
if cname is None and ctx.namespace is not None and nonempty:
cname = ctx.namespace + "::" + name
if name == 'operator' and ctx.visibility == 'extern' and nonempty:
op = s.sy
if [1 for c in op if c in '+-*/<=>!%&|([^~,']:
s.next()
# Handle diphthong operators.
if op == '(':
s.expect(')')
op = '()'
elif op == '[':
s.expect(']')
op = '[]'
elif op in ('-', '+', '|', '&') and s.sy == op:
op *= 2 # ++, --, ...
s.next()
elif s.sy == '=':
op += s.sy # +=, -=, ...
s.next()
if op not in supported_overloaded_operators:
s.error("Overloading operator '%s' not yet supported." % op,
fatal=False)
name += op
result = Nodes.CNameDeclaratorNode(pos,
name = name, cname = cname, default = rhs)
result.calling_convention = calling_convention
return result
def p_nogil(s):
if s.sy == 'IDENT' and s.systring == 'nogil':
s.next()
return 1
else:
return 0
def p_with_gil(s):
if s.sy == 'with':
s.next()
s.expect_keyword('gil')
return 1
else:
return 0
def p_exception_value_clause(s):
exc_val = None
exc_check = 0
if s.sy == 'except':
s.next()
if s.sy == '*':
exc_check = 1
s.next()
elif s.sy == '+':
exc_check = '+'
s.next()
if s.sy == 'IDENT':
name = s.systring
s.next()
exc_val = p_name(s, name)
else:
if s.sy == '?':
exc_check = 1
s.next()
exc_val = p_test(s)
return exc_val, exc_check
c_arg_list_terminators = cython.declare(set, set(['*', '**', '.', ')']))
def p_c_arg_list(s, ctx = Ctx(), in_pyfunc = 0, cmethod_flag = 0,
nonempty_declarators = 0, kw_only = 0, annotated = 1):
# Comma-separated list of C argument declarations, possibly empty.
# May have a trailing comma.
args = []
is_self_arg = cmethod_flag
while s.sy not in c_arg_list_terminators:
args.append(p_c_arg_decl(s, ctx, in_pyfunc, is_self_arg,
nonempty = nonempty_declarators, kw_only = kw_only,
annotated = annotated))
if s.sy != ',':
break
s.next()
is_self_arg = 0
return args
def p_optional_ellipsis(s):
if s.sy == '.':
expect_ellipsis(s)
return 1
else:
return 0
def p_c_arg_decl(s, ctx, in_pyfunc, cmethod_flag = 0, nonempty = 0,
kw_only = 0, annotated = 1):
pos = s.position()
not_none = or_none = 0
default = None
annotation = None
if s.in_python_file:
# empty type declaration
base_type = Nodes.CSimpleBaseTypeNode(pos,
name = None, module_path = [],
is_basic_c_type = 0, signed = 0,
complex = 0, longness = 0,
is_self_arg = cmethod_flag, templates = None)
else:
base_type = p_c_base_type(s, cmethod_flag, nonempty = nonempty)
declarator = p_c_declarator(s, ctx, nonempty = nonempty)
if s.sy in ('not', 'or') and not s.in_python_file:
kind = s.sy
s.next()
if s.sy == 'IDENT' and s.systring == 'None':
s.next()
else:
s.error("Expected 'None'")
if not in_pyfunc:
error(pos, "'%s None' only allowed in Python functions" % kind)
or_none = kind == 'or'
not_none = kind == 'not'
if annotated and s.sy == ':':
s.next()
annotation = p_test(s)
if s.sy == '=':
s.next()
if 'pxd' in ctx.level:
if s.sy not in ['*', '?']:
error(pos, "default values cannot be specified in pxd files, use ? or *")
default = ExprNodes.BoolNode(1)
s.next()
else:
default = p_test(s)
return Nodes.CArgDeclNode(pos,
base_type = base_type,
declarator = declarator,
not_none = not_none,
or_none = or_none,
default = default,
annotation = annotation,
kw_only = kw_only)
def p_api(s):
if s.sy == 'IDENT' and s.systring == 'api':
s.next()
return 1
else:
return 0
def p_cdef_statement(s, ctx):
pos = s.position()
ctx.visibility = p_visibility(s, ctx.visibility)
ctx.api = ctx.api or p_api(s)
if ctx.api:
if ctx.visibility not in ('private', 'public'):
error(pos, "Cannot combine 'api' with '%s'" % ctx.visibility)
if (ctx.visibility == 'extern') and s.sy == 'from':
return p_cdef_extern_block(s, pos, ctx)
elif s.sy == 'import':
s.next()
return p_cdef_extern_block(s, pos, ctx)
elif p_nogil(s):
ctx.nogil = 1
if ctx.overridable:
error(pos, "cdef blocks cannot be declared cpdef")
return p_cdef_block(s, ctx)
elif s.sy == ':':
if ctx.overridable:
error(pos, "cdef blocks cannot be declared cpdef")
return p_cdef_block(s, ctx)
elif s.sy == 'class':
if ctx.level not in ('module', 'module_pxd'):
error(pos, "Extension type definition not allowed here")
if ctx.overridable:
error(pos, "Extension types cannot be declared cpdef")
return p_c_class_definition(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring == 'cppclass':
return p_cpp_class_definition(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring in struct_enum_union:
if ctx.level not in ('module', 'module_pxd'):
error(pos, "C struct/union/enum definition not allowed here")
if ctx.overridable:
error(pos, "C struct/union/enum cannot be declared cpdef")
return p_struct_enum(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring == 'fused':
return p_fused_definition(s, pos, ctx)
else:
return p_c_func_or_var_declaration(s, pos, ctx)
def p_cdef_block(s, ctx):
return p_suite(s, ctx(cdef_flag = 1))
def p_cdef_extern_block(s, pos, ctx):
if ctx.overridable:
error(pos, "cdef extern blocks cannot be declared cpdef")
include_file = None
s.expect('from')
if s.sy == '*':
s.next()
else:
include_file = p_string_literal(s, 'u')[2]
ctx = ctx(cdef_flag = 1, visibility = 'extern')
if s.systring == "namespace":
s.next()
ctx.namespace = p_string_literal(s, 'u')[2]
if p_nogil(s):
ctx.nogil = 1
body = p_suite(s, ctx)
return Nodes.CDefExternNode(pos,
include_file = include_file,
body = body,
namespace = ctx.namespace)
def p_c_enum_definition(s, pos, ctx):
# s.sy == ident 'enum'
s.next()
if s.sy == 'IDENT':
name = s.systring
s.next()
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + name
else:
name = None
cname = None
items = None
s.expect(':')
items = []
if s.sy != 'NEWLINE':
p_c_enum_line(s, ctx, items)
else:
s.next() # 'NEWLINE'
s.expect_indent()
while s.sy not in ('DEDENT', 'EOF'):
p_c_enum_line(s, ctx, items)
s.expect_dedent()
return Nodes.CEnumDefNode(
pos, name = name, cname = cname, items = items,
typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
api = ctx.api, in_pxd = ctx.level == 'module_pxd')
def p_c_enum_line(s, ctx, items):
if s.sy != 'pass':
p_c_enum_item(s, ctx, items)
while s.sy == ',':
s.next()
if s.sy in ('NEWLINE', 'EOF'):
break
p_c_enum_item(s, ctx, items)
else:
s.next()
s.expect_newline("Syntax error in enum item list")
def p_c_enum_item(s, ctx, items):
pos = s.position()
name = p_ident(s)
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + name
value = None
if s.sy == '=':
s.next()
value = p_test(s)
items.append(Nodes.CEnumDefItemNode(pos,
name = name, cname = cname, value = value))
def p_c_struct_or_union_definition(s, pos, ctx):
packed = False
if s.systring == 'packed':
packed = True
s.next()
if s.sy != 'IDENT' or s.systring != 'struct':
s.expected('struct')
# s.sy == ident 'struct' or 'union'
kind = s.systring
s.next()
name = p_ident(s)
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + name
attributes = None
if s.sy == ':':
s.next()
s.expect('NEWLINE')
s.expect_indent()
attributes = []
body_ctx = Ctx()
while s.sy != 'DEDENT':
if s.sy != 'pass':
attributes.append(
p_c_func_or_var_declaration(s, s.position(), body_ctx))
else:
s.next()
s.expect_newline("Expected a newline")
s.expect_dedent()
else:
s.expect_newline("Syntax error in struct or union definition")
return Nodes.CStructOrUnionDefNode(pos,
name = name, cname = cname, kind = kind, attributes = attributes,
typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
api = ctx.api, in_pxd = ctx.level == 'module_pxd', packed = packed)
def p_fused_definition(s, pos, ctx):
"""
c(type)def fused my_fused_type:
...
"""
# s.systring == 'fused'
if ctx.level not in ('module', 'module_pxd'):
error(pos, "Fused type definition not allowed here")
s.next()
name = p_ident(s)
s.expect(":")
s.expect_newline()
s.expect_indent()
types = []
while s.sy != 'DEDENT':
if s.sy != 'pass':
#types.append(p_c_declarator(s))
types.append(p_c_base_type(s)) #, nonempty=1))
else:
s.next()
s.expect_newline()
s.expect_dedent()
if not types:
error(pos, "Need at least one type")
return Nodes.FusedTypeNode(pos, name=name, types=types)
def p_struct_enum(s, pos, ctx):
if s.systring == 'enum':
return p_c_enum_definition(s, pos, ctx)
else:
return p_c_struct_or_union_definition(s, pos, ctx)
def p_visibility(s, prev_visibility):
pos = s.position()
visibility = prev_visibility
if s.sy == 'IDENT' and s.systring in ('extern', 'public', 'readonly'):
visibility = s.systring
if prev_visibility != 'private' and visibility != prev_visibility:
s.error("Conflicting visibility options '%s' and '%s'"
% (prev_visibility, visibility), fatal=False)
s.next()
return visibility
def p_c_modifiers(s):
if s.sy == 'IDENT' and s.systring in ('inline',):
modifier = s.systring
s.next()
return [modifier] + p_c_modifiers(s)
return []
def p_c_func_or_var_declaration(s, pos, ctx):
cmethod_flag = ctx.level in ('c_class', 'c_class_pxd')
modifiers = p_c_modifiers(s)
base_type = p_c_base_type(s, nonempty = 1, templates = ctx.templates)
declarator = p_c_declarator(s, ctx, cmethod_flag = cmethod_flag,
assignable = 1, nonempty = 1)
declarator.overridable = ctx.overridable
if s.sy == 'IDENT' and s.systring == 'const' and ctx.level == 'cpp_class':
s.next()
is_const_method = 1
else:
is_const_method = 0
if s.sy == ':':
if ctx.level not in ('module', 'c_class', 'module_pxd', 'c_class_pxd', 'cpp_class') and not ctx.templates:
s.error("C function definition not allowed here")
doc, suite = p_suite_with_docstring(s, Ctx(level='function'))
result = Nodes.CFuncDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
declarator = declarator,
body = suite,
doc = doc,
modifiers = modifiers,
api = ctx.api,
overridable = ctx.overridable,
is_const_method = is_const_method)
else:
#if api:
# s.error("'api' not allowed with variable declaration")
if is_const_method:
declarator.is_const_method = is_const_method
declarators = [declarator]
while s.sy == ',':
s.next()
if s.sy == 'NEWLINE':
break
declarator = p_c_declarator(s, ctx, cmethod_flag = cmethod_flag,
assignable = 1, nonempty = 1)
declarators.append(declarator)
doc_line = s.start_line + 1
s.expect_newline("Syntax error in C variable declaration")
if ctx.level in ('c_class', 'c_class_pxd') and s.start_line == doc_line:
doc = p_doc_string(s)
else:
doc = None
result = Nodes.CVarDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
declarators = declarators,
in_pxd = ctx.level in ('module_pxd', 'c_class_pxd'),
doc = doc,
api = ctx.api,
modifiers = modifiers,
overridable = ctx.overridable)
return result
def p_ctypedef_statement(s, ctx):
# s.sy == 'ctypedef'
pos = s.position()
s.next()
visibility = p_visibility(s, ctx.visibility)
api = p_api(s)
ctx = ctx(typedef_flag = 1, visibility = visibility)
if api:
ctx.api = 1
if s.sy == 'class':
return p_c_class_definition(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring in struct_enum_union:
return p_struct_enum(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring == 'fused':
return p_fused_definition(s, pos, ctx)
else:
base_type = p_c_base_type(s, nonempty = 1)
declarator = p_c_declarator(s, ctx, is_type = 1, nonempty = 1)
s.expect_newline("Syntax error in ctypedef statement")
return Nodes.CTypeDefNode(
pos, base_type = base_type,
declarator = declarator,
visibility = visibility, api = api,
in_pxd = ctx.level == 'module_pxd')
def p_decorators(s):
decorators = []
while s.sy == 'DECORATOR':
pos = s.position()
s.next()
decstring = p_dotted_name(s, as_allowed=0)[2]
names = decstring.split('.')
decorator = ExprNodes.NameNode(pos, name=EncodedString(names[0]))
for name in names[1:]:
decorator = ExprNodes.AttributeNode(pos,
attribute=EncodedString(name),
obj=decorator)
if s.sy == '(':
decorator = p_call(s, decorator)
decorators.append(Nodes.DecoratorNode(pos, decorator=decorator))
s.expect_newline("Expected a newline after decorator")
return decorators
def p_def_statement(s, decorators=None):
# s.sy == 'def'
pos = s.position()
s.next()
name = EncodedString( p_ident(s) )
s.expect('(')
args, star_arg, starstar_arg = p_varargslist(s, terminator=')')
s.expect(')')
if p_nogil(s):
error(pos, "Python function cannot be declared nogil")
return_type_annotation = None
if s.sy == '->':
s.next()
return_type_annotation = p_test(s)
doc, body = p_suite_with_docstring(s, Ctx(level='function'))
return Nodes.DefNode(pos, name = name, args = args,
star_arg = star_arg, starstar_arg = starstar_arg,
doc = doc, body = body, decorators = decorators,
return_type_annotation = return_type_annotation)
def p_varargslist(s, terminator=')', annotated=1):
args = p_c_arg_list(s, in_pyfunc = 1, nonempty_declarators = 1,
annotated = annotated)
star_arg = None
starstar_arg = None
if s.sy == '*':
s.next()
if s.sy == 'IDENT':
star_arg = p_py_arg_decl(s, annotated=annotated)
if s.sy == ',':
s.next()
args.extend(p_c_arg_list(s, in_pyfunc = 1,
nonempty_declarators = 1, kw_only = 1, annotated = annotated))
elif s.sy != terminator:
s.error("Syntax error in Python function argument list")
if s.sy == '**':
s.next()
starstar_arg = p_py_arg_decl(s, annotated=annotated)
return (args, star_arg, starstar_arg)
def p_py_arg_decl(s, annotated = 1):
pos = s.position()
name = p_ident(s)
annotation = None
if annotated and s.sy == ':':
s.next()
annotation = p_test(s)
return Nodes.PyArgDeclNode(pos, name = name, annotation = annotation)
def p_class_statement(s, decorators):
# s.sy == 'class'
pos = s.position()
s.next()
class_name = EncodedString( p_ident(s) )
class_name.encoding = s.source_encoding
arg_tuple = None
keyword_dict = None
starstar_arg = None
if s.sy == '(':
positional_args, keyword_args, star_arg, starstar_arg = \
p_call_parse_args(s, allow_genexp = False)
arg_tuple, keyword_dict = p_call_build_packed_args(
pos, positional_args, keyword_args, star_arg, None)
if arg_tuple is None:
# XXX: empty arg_tuple
arg_tuple = ExprNodes.TupleNode(pos, args=[])
doc, body = p_suite_with_docstring(s, Ctx(level='class'))
return Nodes.PyClassDefNode(
pos, name=class_name,
bases=arg_tuple,
keyword_args=keyword_dict,
starstar_arg=starstar_arg,
doc=doc, body=body, decorators=decorators,
force_py3_semantics=s.context.language_level >= 3)
def p_c_class_definition(s, pos, ctx):
# s.sy == 'class'
s.next()
module_path = []
class_name = p_ident(s)
while s.sy == '.':
s.next()
module_path.append(class_name)
class_name = p_ident(s)
if module_path and ctx.visibility != 'extern':
error(pos, "Qualified class name only allowed for 'extern' C class")
if module_path and s.sy == 'IDENT' and s.systring == 'as':
s.next()
as_name = p_ident(s)
else:
as_name = class_name
objstruct_name = None
typeobj_name = None
base_class_module = None
base_class_name = None
if s.sy == '(':
s.next()
base_class_path = [p_ident(s)]
while s.sy == '.':
s.next()
base_class_path.append(p_ident(s))
if s.sy == ',':
s.error("C class may only have one base class", fatal=False)
s.expect(')')
base_class_module = ".".join(base_class_path[:-1])
base_class_name = base_class_path[-1]
if s.sy == '[':
if ctx.visibility not in ('public', 'extern') and not ctx.api:
error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class")
objstruct_name, typeobj_name = p_c_class_options(s)
if s.sy == ':':
if ctx.level == 'module_pxd':
body_level = 'c_class_pxd'
else:
body_level = 'c_class'
doc, body = p_suite_with_docstring(s, Ctx(level=body_level))
else:
s.expect_newline("Syntax error in C class definition")
doc = None
body = None
if ctx.visibility == 'extern':
if not module_path:
error(pos, "Module name required for 'extern' C class")
if typeobj_name:
error(pos, "Type object name specification not allowed for 'extern' C class")
elif ctx.visibility == 'public':
if not objstruct_name:
error(pos, "Object struct name specification required for 'public' C class")
if not typeobj_name:
error(pos, "Type object name specification required for 'public' C class")
elif ctx.visibility == 'private':
if ctx.api:
if not objstruct_name:
error(pos, "Object struct name specification required for 'api' C class")
if not typeobj_name:
error(pos, "Type object name specification required for 'api' C class")
else:
error(pos, "Invalid class visibility '%s'" % ctx.visibility)
return Nodes.CClassDefNode(pos,
visibility = ctx.visibility,
typedef_flag = ctx.typedef_flag,
api = ctx.api,
module_name = ".".join(module_path),
class_name = class_name,
as_name = as_name,
base_class_module = base_class_module,
base_class_name = base_class_name,
objstruct_name = objstruct_name,
typeobj_name = typeobj_name,
in_pxd = ctx.level == 'module_pxd',
doc = doc,
body = body)
def p_c_class_options(s):
objstruct_name = None
typeobj_name = None
s.expect('[')
while 1:
if s.sy != 'IDENT':
break
if s.systring == 'object':
s.next()
objstruct_name = p_ident(s)
elif s.systring == 'type':
s.next()
typeobj_name = p_ident(s)
if s.sy != ',':
break
s.next()
s.expect(']', "Expected 'object' or 'type'")
return objstruct_name, typeobj_name
def p_property_decl(s):
pos = s.position()
s.next() # 'property'
name = p_ident(s)
doc, body = p_suite_with_docstring(
s, Ctx(level='property'), with_doc_only=True)
return Nodes.PropertyNode(pos, name=name, doc=doc, body=body)
def p_ignorable_statement(s):
"""
Parses any kind of ignorable statement that is allowed in .pxd files.
"""
if s.sy == 'BEGIN_STRING':
pos = s.position()
string_node = p_atom(s)
if s.sy != 'EOF':
s.expect_newline("Syntax error in string")
return Nodes.ExprStatNode(pos, expr=string_node)
return None
def p_doc_string(s):
if s.sy == 'BEGIN_STRING':
pos = s.position()
kind, bytes_result, unicode_result = p_cat_string_literal(s)
if s.sy != 'EOF':
s.expect_newline("Syntax error in doc string")
if kind in ('u', ''):
return unicode_result
warning(pos, "Python 3 requires docstrings to be unicode strings")
return bytes_result
else:
return None
def _extract_docstring(node):
"""
Extract a docstring from a statement or from the first statement
in a list. Remove the statement if found. Return a tuple
(plain-docstring or None, node).
"""
doc_node = None
if node is None:
pass
elif isinstance(node, Nodes.ExprStatNode):
if node.expr.is_string_literal:
doc_node = node.expr
node = Nodes.StatListNode(node.pos, stats=[])
elif isinstance(node, Nodes.StatListNode) and node.stats:
stats = node.stats
if isinstance(stats[0], Nodes.ExprStatNode):
if stats[0].expr.is_string_literal:
doc_node = stats[0].expr
del stats[0]
if doc_node is None:
doc = None
elif isinstance(doc_node, ExprNodes.BytesNode):
warning(node.pos,
"Python 3 requires docstrings to be unicode strings")
doc = doc_node.value
elif isinstance(doc_node, ExprNodes.StringNode):
doc = doc_node.unicode_value
if doc is None:
doc = doc_node.value
else:
doc = doc_node.value
return doc, node
def p_code(s, level=None, ctx=Ctx):
body = p_statement_list(s, ctx(level = level), first_statement = 1)
if s.sy != 'EOF':
s.error("Syntax error in statement [%s,%s]" % (
repr(s.sy), repr(s.systring)))
return body
_match_compiler_directive_comment = cython.declare(object, re.compile(
r"^#\s*cython\s*:\s*((\w|[.])+\s*=.*)$").match)
def p_compiler_directive_comments(s):
result = {}
while s.sy == 'commentline':
m = _match_compiler_directive_comment(s.systring)
if m:
directives = m.group(1).strip()
try:
result.update(Options.parse_directive_list(
directives, ignore_unknown=True))
except ValueError, e:
s.error(e.args[0], fatal=False)
s.next()
return result
def p_module(s, pxd, full_module_name, ctx=Ctx):
pos = s.position()
directive_comments = p_compiler_directive_comments(s)
s.parse_comments = False
if 'language_level' in directive_comments:
s.context.set_language_level(directive_comments['language_level'])
doc = p_doc_string(s)
if pxd:
level = 'module_pxd'
else:
level = 'module'
body = p_statement_list(s, ctx(level=level), first_statement = 1)
if s.sy != 'EOF':
s.error("Syntax error in statement [%s,%s]" % (
repr(s.sy), repr(s.systring)))
return ModuleNode(pos, doc = doc, body = body,
full_module_name = full_module_name,
directive_comments = directive_comments)
def p_cpp_class_definition(s, pos, ctx):
# s.sy == 'cppclass'
s.next()
module_path = []
class_name = p_ident(s)
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + class_name
if s.sy == '.':
error(pos, "Qualified class name not allowed C++ class")
if s.sy == '[':
s.next()
templates = [p_ident(s)]
while s.sy == ',':
s.next()
templates.append(p_ident(s))
s.expect(']')
else:
templates = None
if s.sy == '(':
s.next()
base_classes = [p_c_base_type(s, templates = templates)]
while s.sy == ',':
s.next()
base_classes.append(p_c_base_type(s, templates = templates))
s.expect(')')
else:
base_classes = []
if s.sy == '[':
error(s.position(), "Name options not allowed for C++ class")
nogil = p_nogil(s)
if s.sy == ':':
s.next()
s.expect('NEWLINE')
s.expect_indent()
attributes = []
body_ctx = Ctx(visibility = ctx.visibility, level='cpp_class', nogil=nogil or ctx.nogil)
body_ctx.templates = templates
while s.sy != 'DEDENT':
if s.systring == 'cppclass':
attributes.append(
p_cpp_class_definition(s, s.position(), body_ctx))
elif s.sy != 'pass':
attributes.append(
p_c_func_or_var_declaration(s, s.position(), body_ctx))
else:
s.next()
s.expect_newline("Expected a newline")
s.expect_dedent()
else:
attributes = None
s.expect_newline("Syntax error in C++ class definition")
return Nodes.CppClassNode(pos,
name = class_name,
cname = cname,
base_classes = base_classes,
visibility = ctx.visibility,
in_pxd = ctx.level == 'module_pxd',
attributes = attributes,
templates = templates)
#----------------------------------------------
#
# Debugging
#
#----------------------------------------------
def print_parse_tree(f, node, level, key = None):
from types import ListType, TupleType
from Nodes import Node
ind = " " * level
if node:
f.write(ind)
if key:
f.write("%s: " % key)
t = type(node)
if t is tuple:
f.write("(%s @ %s\n" % (node[0], node[1]))
for i in xrange(2, len(node)):
print_parse_tree(f, node[i], level+1)
f.write("%s)\n" % ind)
return
elif isinstance(node, Node):
try:
tag = node.tag
except AttributeError:
tag = node.__class__.__name__
f.write("%s @ %s\n" % (tag, node.pos))
for name, value in node.__dict__.items():
if name != 'tag' and name != 'pos':
print_parse_tree(f, value, level+1, name)
return
elif t is list:
f.write("[\n")
for i in xrange(len(node)):
print_parse_tree(f, node[i], level+1)
f.write("%s]\n" % ind)
return
f.write("%s%s\n" % (ind, node))
| bsd-3-clause |
sigmavirus24/pip | pip/vcs/git.py | 3 | 11208 | from __future__ import absolute_import
import logging
import tempfile
import os.path
from pip.compat import samefile
from pip.exceptions import BadCommand
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._vendor.packaging.version import parse as parse_version
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(['version'], show_stdout=False)
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version becasue
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_short_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warning(
"Could not find a tag or branch '%s', assuming commit.", rev,
)
return rev_options
def check_version(self, dest, rev_options):
"""
Compare the current sha to the ref. ref may be a branch or tag name,
but current rev will always point to a sha. This means that a branch
or tag will never compare as True. So this ultimately only matches
against exact shas.
"""
return self.get_revision(dest).startswith(rev_options[0])
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
self.run_command(['checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
if self.get_git_version() >= parse_version('1.9.0'):
# fetch tags in addition to everything else
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
if rev_options:
rev_options = self.check_rev_options(
rev_options[0], dest, rev_options,
)
self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.check_version(dest, rev_options):
self.run_command(
['checkout', '-q'] + rev_options,
cwd=dest,
)
#: repo may contain submodules
self.update_submodules(dest)
def get_url(self, location):
"""Return URL of the first remote encountered."""
remotes = self.run_command(
['config', '--get-regexp', 'remote\..*\.url'],
show_stdout=False, cwd=location)
remotes = remotes.splitlines()
found_remote = remotes[0]
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_full_refs(self, location):
"""Yields tuples of (commit, ref) for branches and tags"""
output = self.run_command(['show-ref'],
show_stdout=False, cwd=location)
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
yield commit.strip(), ref.strip()
def is_ref_remote(self, ref):
return ref.startswith('refs/remotes/')
def is_ref_branch(self, ref):
return ref.startswith('refs/heads/')
def is_ref_tag(self, ref):
return ref.startswith('refs/tags/')
def is_ref_commit(self, ref):
"""A ref is a commit sha if it is not anything else"""
return not any((
self.is_ref_remote(ref),
self.is_ref_branch(ref),
self.is_ref_tag(ref),
))
# Should deprecate `get_refs` since it's ambiguous
def get_refs(self, location):
return self.get_short_refs(location)
def get_short_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
rv = {}
for commit, ref in self.get_full_refs(location):
ref_name = None
if self.is_ref_remote(ref):
ref_name = ref[len('refs/remotes/'):]
elif self.is_ref_branch(ref):
ref_name = ref[len('refs/heads/'):]
elif self.is_ref_tag(ref):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit
return rv
def _get_subdirectory(self, location):
"""Return the relative path of setup.py to the git repo root."""
# find the repo root
git_dir = self.run_command(['rev-parse', '--git-dir'],
show_stdout=False, cwd=location).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
root_dir = os.path.join(git_dir, '..')
# find setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
# relative path of setup.py to repo root
if samefile(root_dir, location):
return None
return os.path.relpath(location, root_dir)
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
subdirectory = self._get_subdirectory(location)
if subdirectory:
req += '&subdirectory=' + subdirectory
return req
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if '://' not in self.url:
assert 'file:' not in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def controls_location(cls, location):
if super(Git, cls).controls_location(location):
return True
try:
r = cls().run_command(['rev-parse'],
cwd=location,
show_stdout=False,
on_returncode='ignore')
return not r
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return False
vcs.register(Git)
| mit |
shakamunyi/tensorflow | tensorflow/python/kernel_tests/sparse_slice_op_test.py | 47 | 13161 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseSliceOpTest(test.TestCase):
def _SparseTensor_4x6(self):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(np.int64)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0],
[2, 2, 1]]).astype(np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x4x2(
))
def testSliceMatrixRows(self):
with self.test_session(use_gpu=False):
sp_input=self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [3, 7])
self.assertAllEqual(sp_tensor0.indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5], [1, 1], [1, 3],
[1, 4]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 6])
self.assertAllEqual(sp_tensor1.indices.eval(), [[0, 0], [0, 3], [0, 5],
[1, 0], [1, 2], [1, 3],
[1, 5]])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 6])
def testSliceMatrixUnevenCols(self):
with self.test_session(use_gpu=False):
sp_input=self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 3])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 3], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 5], [5, 2])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2],
[4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 3])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor1.values.eval(),
[4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor2.values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 2])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 2])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 6], [5, 2])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensor2.values.eval(), [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor3.indices.eval(), [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensor3.values.eval(), [16, 46])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [5, 1])
def testSliceMatrixUnevenRows(self):
with self.test_session(use_gpu=False):
sp_input=self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [3, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [3, 0], [3, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [3, 7])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4],
[1, 6]])
self.assertAllEqual(sp_tensor1.values.eval(),
[30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [2, 7])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [4, 0], [2, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 1], [0, 4],
[0, 6]])
self.assertAllEqual(sp_tensor2.values.eval(), [41, 44, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 7])
return
def testSliceAllRows(self):
with self.test_session(use_gpu=False):
sp_input=self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [1, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [1, 0], [1, 6])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [2, 0], [1, 7])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [3, 0], [2, 7])
self.assertAllEqual(sp_tensor0.indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor1.indices.eval(), [[0, 1], [0, 3], [0,
4]])
self.assertAllEqual(sp_tensor1.values.eval(), [11, 13, 14])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 0], [0, 3], [0,
5]])
self.assertAllEqual(sp_tensor2.values.eval(), [20, 23, 25])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor3.indices.eval(), [[0, 0], [0, 2], [0, 3],
[0, 5]])
self.assertAllEqual(sp_tensor3.values.eval(), [30, 32, 33, 35])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [1, 6])
def testSliceColumns(self):
with self.test_session(use_gpu=False):
sp_input=self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 2])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(), [[0, 0], [1, 1],
[2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 11, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensor2.values.eval(), [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 2])
def testSliceAllColumns(self):
with self.test_session(use_gpu=False):
sp_input=self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 1])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 1], [4, 1])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 2], [4, 1])
sparse_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 3], [4, 1])
sparse_tensor4 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 1])
sparse_tensor5 = sparse_ops.sparse_slice(sp_input, [0, 5], [6, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor1.indices.eval(), [[1, 0]])
self.assertAllEqual(sparse_tensor1.values.eval(), [11])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor2.indices.eval(), [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensor2.values.eval(), [2, 32])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.indices.eval(), [[1, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensor3.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.values.eval(), [13, 23, 33])
self.assertAllEqual(sparse_tensor4.indices.eval(), [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensor4.values.eval(), [4, 14])
self.assertAllEqual(sparse_tensor4.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor5.indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensor5.values.eval(), [5, 25, 35])
self.assertAllEqual(sparse_tensor5.dense_shape.eval(), [4, 1])
if __name__ == '__main__':
test.main()
| apache-2.0 |
B-UMMI/INNUca | src/SPAdes-3.13.0-Linux/share/spades/pyyaml2/tokens.py | 985 | 2573 |
class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__dict__
if not key.endswith('_mark')]
attributes.sort()
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
#class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class DocumentStartToken(Token):
id = '<document start>'
class DocumentEndToken(Token):
id = '<document end>'
class StreamStartToken(Token):
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None,
encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndToken(Token):
id = '<stream end>'
class BlockSequenceStartToken(Token):
id = '<block sequence start>'
class BlockMappingStartToken(Token):
id = '<block mapping start>'
class BlockEndToken(Token):
id = '<block end>'
class FlowSequenceStartToken(Token):
id = '['
class FlowMappingStartToken(Token):
id = '{'
class FlowSequenceEndToken(Token):
id = ']'
class FlowMappingEndToken(Token):
id = '}'
class KeyToken(Token):
id = '?'
class ValueToken(Token):
id = ':'
class BlockEntryToken(Token):
id = '-'
class FlowEntryToken(Token):
id = ','
class AliasToken(Token):
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class AnchorToken(Token):
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class TagToken(Token):
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class ScalarToken(Token):
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
| gpl-3.0 |
swatilodha/coala | tests/parsing/StringProcessing/UnescapedSearchForTest.py | 17 | 6174 |
from coalib.parsing.StringProcessing import unescaped_search_for
from tests.parsing.StringProcessing.StringProcessingTestBase import (
StringProcessingTestBase)
class UnescapedSearchForTest(StringProcessingTestBase):
# Match either "out1" or "out2".
test_basic_pattern = "out1|out2"
# These are the expected results for the zero-group of the
# returned MatchObject's.
test_basic_expected_results = [
[r"out1", r"out2"],
[r"out1", r"out2"],
[r"out1", r"out2"],
[r"out1", r"out2"],
[r"out1", r"out2"],
[r"out1", r"out2"],
[r"out1", r"out2", r"out2"],
[r"out1", r"out2", r"out2"],
[r"out1", r"out2", r"out2"],
[r"out1", r"out2", r"out2"],
[r"out1", r"out2", r"out2"],
[r"out1", r"out2", r"out2"],
[r"out1", r"out2", r"out2"],
[r"out1", r"out2"],
[],
[r"out1", r"out2"],
[],
[]]
@staticmethod
def list_zero_group(it):
"""
Collects all MatchObject elements from the given iterator and extracts
their first matching group (group 0).
:param it: The input iterator where to collect from.
"""
return [elem.group(0) for elem in it]
# Test the unescaped_search_for() function.
def test_basic(self):
expected_results = self.test_basic_expected_results
self.assertResultsEqual(
unescaped_search_for,
{(self.test_basic_pattern, test_string, 0, 0, True): result
for test_string, result in zip(self.test_strings,
expected_results)},
self.list_zero_group)
# Test unescaped_search_for() with a simple pattern.
def test_simple_pattern(self):
expected_results = [
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
6 * [r"'"],
[],
[],
[],
[]]
self.assertResultsEqual(
unescaped_search_for,
{(r"'", test_string, 0, 0, use_regex): result
for test_string, result in zip(self.test_strings,
expected_results)
for use_regex in [True, False]},
self.list_zero_group)
# Test unescaped_search_for() with an empty pattern.
def test_empty_pattern(self):
# Since an empty pattern can also be escaped, the result contains
# special cases. Especially we check the completely matched string (and
# not only the matched pattern itself) we need to place also the
# matched escape characters inside the result list consumed from the
# internal regex of unescaped_search_for().
expected_results = [
38 * [r""],
38 * [r""],
38 * [r""],
37 * [r""],
38 * [r""],
38 * [r""],
39 * [r""],
38 * [r""],
37 * [r""],
38 * [r""],
37 * [r""],
38 * [r""],
37 * [r""],
39 * [r""],
[r""],
15 * [r""],
[r""],
2 * [r""]]
self.assertResultsEqual(
unescaped_search_for,
{(r"", test_string, 0, 0, use_regex): result
for test_string, result in zip(self.test_strings,
expected_results)
for use_regex in [True, False]},
self.list_zero_group)
# Test unescaped_search_for() for its max_match parameter.
def test_max_match(self):
search_pattern = self.test_basic_pattern
expected_master_results = self.test_basic_expected_results
self.assertResultsEqual(
unescaped_search_for,
{(search_pattern, test_string, 0, max_match, True): result
for max_match in [1, 2, 3, 4, 5, 6, 987, 1122334455]
for test_string, result in zip(
self.test_strings,
[elem[0: max_match] for elem in expected_master_results])},
self.list_zero_group)
# Test unescaped_search_for() for its max_match parameter with matches
# that are also escaped.
def test_max_match_escaping_flaw(self):
expected_master_results = [
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
2 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
4 * [r"'"],
6 * [r"'"],
[],
[],
[],
[]]
self.assertResultsEqual(
unescaped_search_for,
{(r"'", test_string, 0, max_match, use_regex): result
for max_match in [1, 2, 3, 4, 5, 6, 100]
for test_string, result in zip(
self.test_strings,
[elem[0: max_match] for elem in expected_master_results])
for use_regex in [True, False]},
self.list_zero_group)
# Test unescaped_search_for() with regexes disabled.
def test_disabled_regex(self):
search_pattern = r"\'"
expected_results = [
[],
[search_pattern],
[],
[],
[search_pattern],
[],
[],
[search_pattern],
[search_pattern],
[],
[],
[],
[],
[],
[],
[],
[],
[]]
self.assertResultsEqual(
unescaped_search_for,
{(search_pattern, test_string, 0, 0, False): result
for test_string, result in zip(self.test_strings,
expected_results)},
self.list_zero_group)
| agpl-3.0 |
drexly/openhgsenti | lib/django/contrib/gis/db/backends/base/models.py | 434 | 7111 | import re
from django.contrib.gis import gdal
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(
r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)'
r'(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)",'
r'"(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,'
r'AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$'
)
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m:
return (float(m.group('major')), float(m.group('flattening')))
else:
return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrieve the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __str__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return six.text_type(self.srs)
except Exception:
return six.text_type(self.wkt)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.