gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Implement -f aka looponfailing for pytest.
NOTE that we try to avoid loading and depending on application modules
within the controlling process (the one that starts repeatedly test
processes) otherwise changes to source code can crash
the controlling process which should best never happen.
"""
from __future__ import print_function
import py
import pytest
import sys
import time
import execnet
def pytest_addoption(parser):
group = parser.getgroup("xdist", "distributed and subprocess testing")
group._addoption(
"-f",
"--looponfail",
action="store_true",
dest="looponfail",
default=False,
help="run tests in subprocess, wait for modified files "
"and re-run failing test set until all pass.",
)
def pytest_cmdline_main(config):
if config.getoption("looponfail"):
usepdb = config.getoption("usepdb", False) # a core option
if usepdb:
raise pytest.UsageError("--pdb is incompatible with --looponfail.")
looponfail_main(config)
return 2 # looponfail only can get stop with ctrl-C anyway
def looponfail_main(config):
remotecontrol = RemoteControl(config)
rootdirs = config.getini("looponfailroots")
statrecorder = StatRecorder(rootdirs)
try:
while 1:
remotecontrol.loop_once()
if not remotecontrol.failures and remotecontrol.wasfailing:
# the last failures passed, let's immediately rerun all
continue
repr_pytest_looponfailinfo(
failreports=remotecontrol.failures, rootdirs=rootdirs
)
statrecorder.waitonchange(checkinterval=2.0)
except KeyboardInterrupt:
print()
class RemoteControl(object):
def __init__(self, config):
self.config = config
self.failures = []
def trace(self, *args):
if self.config.option.debug:
msg = " ".join([str(x) for x in args])
print("RemoteControl:", msg)
def initgateway(self):
return execnet.makegateway("popen")
def setup(self, out=None):
if out is None:
out = py.io.TerminalWriter()
if hasattr(self, "gateway"):
raise ValueError("already have gateway %r" % self.gateway)
self.trace("setting up worker session")
self.gateway = self.initgateway()
self.channel = channel = self.gateway.remote_exec(
init_worker_session,
args=self.config.args,
option_dict=vars(self.config.option),
)
remote_outchannel = channel.receive()
def write(s):
out._file.write(s)
out._file.flush()
remote_outchannel.setcallback(write)
def ensure_teardown(self):
if hasattr(self, "channel"):
if not self.channel.isclosed():
self.trace("closing", self.channel)
self.channel.close()
del self.channel
if hasattr(self, "gateway"):
self.trace("exiting", self.gateway)
self.gateway.exit()
del self.gateway
def runsession(self):
try:
self.trace("sending", self.failures)
self.channel.send(self.failures)
try:
return self.channel.receive()
except self.channel.RemoteError:
e = sys.exc_info()[1]
self.trace("ERROR", e)
raise
finally:
self.ensure_teardown()
def loop_once(self):
self.setup()
self.wasfailing = self.failures and len(self.failures)
result = self.runsession()
failures, reports, collection_failed = result
if collection_failed:
pass # "Collection failed, keeping previous failure set"
else:
uniq_failures = []
for failure in failures:
if failure not in uniq_failures:
uniq_failures.append(failure)
self.failures = uniq_failures
def repr_pytest_looponfailinfo(failreports, rootdirs):
tr = py.io.TerminalWriter()
if failreports:
tr.sep("#", "LOOPONFAILING", bold=True)
for report in failreports:
if report:
tr.line(report, red=True)
tr.sep("#", "waiting for changes", bold=True)
for rootdir in rootdirs:
tr.line("### Watching: %s" % (rootdir,), bold=True)
def init_worker_session(channel, args, option_dict):
import os
import sys
outchannel = channel.gateway.newchannel()
sys.stdout = sys.stderr = outchannel.makefile("w")
channel.send(outchannel)
# prune sys.path to not contain relative paths
newpaths = []
for p in sys.path:
if p:
if not os.path.isabs(p):
p = os.path.abspath(p)
newpaths.append(p)
sys.path[:] = newpaths
# fullwidth, hasmarkup = channel.receive()
from _pytest.config import Config
config = Config.fromdictargs(option_dict, list(args))
config.args = args
from xdist.looponfail import WorkerFailSession
WorkerFailSession(config, channel).main()
class WorkerFailSession(object):
def __init__(self, config, channel):
self.config = config
self.channel = channel
self.recorded_failures = []
self.collection_failed = False
config.pluginmanager.register(self)
config.option.looponfail = False
config.option.usepdb = False
def DEBUG(self, *args):
if self.config.option.debug:
print(" ".join(map(str, args)))
def pytest_collection(self, session):
self.session = session
self.trails = self.current_command
hook = self.session.ihook
try:
items = session.perform_collect(self.trails or None)
except pytest.UsageError:
items = session.perform_collect(None)
hook.pytest_collection_modifyitems(
session=session, config=session.config, items=items
)
hook.pytest_collection_finish(session=session)
return True
def pytest_runtest_logreport(self, report):
if report.failed:
self.recorded_failures.append(report)
def pytest_collectreport(self, report):
if report.failed:
self.recorded_failures.append(report)
self.collection_failed = True
def main(self):
self.DEBUG("WORKER: received configuration, waiting for command trails")
try:
command = self.channel.receive()
except KeyboardInterrupt:
return # in the worker we can't do much about this
self.DEBUG("received", command)
self.current_command = command
self.config.hook.pytest_cmdline_main(config=self.config)
trails, failreports = [], []
for rep in self.recorded_failures:
trails.append(rep.nodeid)
loc = rep.longrepr
loc = str(getattr(loc, "reprcrash", loc))
failreports.append(loc)
self.channel.send((trails, failreports, self.collection_failed))
class StatRecorder(object):
def __init__(self, rootdirlist):
self.rootdirlist = rootdirlist
self.statcache = {}
self.check() # snapshot state
def fil(self, p):
return p.check(file=1, dotfile=0) and p.ext != ".pyc"
def rec(self, p):
return p.check(dotfile=0)
def waitonchange(self, checkinterval=1.0):
while 1:
changed = self.check()
if changed:
return
time.sleep(checkinterval)
def check(self, removepycfiles=True): # noqa, too complex
changed = False
statcache = self.statcache
newstat = {}
for rootdir in self.rootdirlist:
for path in rootdir.visit(self.fil, self.rec):
oldstat = statcache.pop(path, None)
try:
newstat[path] = curstat = path.stat()
except py.error.ENOENT:
if oldstat:
changed = True
else:
if oldstat:
if (
oldstat.mtime != curstat.mtime
or oldstat.size != curstat.size
):
changed = True
print("# MODIFIED", path)
if removepycfiles and path.ext == ".py":
pycfile = path + "c"
if pycfile.check():
pycfile.remove()
else:
changed = True
if statcache:
changed = True
self.statcache = newstat
return changed
|
|
from .. import app
from .. import db
from .. import util
from ..models import Post, Setting, get_settings
from flask.ext.login import login_required
from flask import request, redirect, url_for, render_template, flash,\
has_request_context
import requests
import json
import urllib
def register():
pass
@app.route('/authorize_facebook')
@login_required
def authorize_facebook():
import urllib.parse
import urllib.request
redirect_uri = url_for('authorize_facebook', _external=True)
params = {
'client_id': get_settings().facebook_app_id,
'redirect_uri': redirect_uri,
'scope': 'publish_stream,user_photos',
}
code = request.args.get('code')
if code:
params['code'] = code
params['client_secret'] = get_settings().facebook_app_secret
r = urllib.request.urlopen(
'https://graph.facebook.com/oauth/access_token?'
+ urllib.parse.urlencode(params))
payload = urllib.parse.parse_qs(r.read())
access_token = payload[b'access_token'][0].decode('ascii')
Setting.query.get('facebook_access_token').value = access_token
db.session.commit()
return redirect(url_for('edit_settings'))
else:
return redirect('https://graph.facebook.com/oauth/authorize?'
+ urllib.parse.urlencode(params))
@app.route('/share_on_facebook', methods=['GET', 'POST'])
@login_required
def share_on_facebook():
from .twitter import collect_images
if request.method == 'GET':
post = Post.load_by_id(request.args.get('id'))
preview = post.title + '\n\n' if post.title else ''
preview += format_markdown_as_facebook(post.content)
imgs = [urllib.parse.urljoin(get_settings().site_url, img)
for img in collect_images(post)]
albums = []
if imgs:
app.logger.debug('fetching user albums')
resp = requests.get(
'https://graph.facebook.com/v2.2/me/albums',
params={'access_token': get_settings().facebook_access_token})
resp.raise_for_status()
app.logger.debug('user albums response %s: %s', resp, resp.text)
albums = resp.json().get('data', [])
return render_template('admin/share_on_facebook.jinja2', post=post,
preview=preview, imgs=imgs, albums=albums)
try:
post_id = request.form.get('post_id')
preview = request.form.get('preview')
img_url = request.form.get('img')
post_type = request.form.get('post_type')
album_id = request.form.get('album')
if album_id == 'new':
album_id = create_album(
request.form.get('new_album_name'),
request.form.get('new_album_message'))
post = Post.load_by_id(post_id)
facebook_url = handle_new_or_edit(post, preview, img_url,
post_type, album_id)
db.session.commit()
if has_request_context():
flash('Shared on Facebook: <a href="{}">Original</a>, '
'<a href="{}">On Facebook</a><br/>'
.format(post.permalink, facebook_url))
return redirect(post.permalink)
except Exception as e:
if has_request_context():
app.logger.exception('posting to facebook')
flash('Share on Facebook Failed! Exception: {}'.format(e))
return redirect(url_for('index'))
class PersonTagger:
def __init__(self):
self.tags = []
self.taggable_friends = None
def get_taggable_friends(self):
if not self.taggable_friends:
r = requests.get(
'https://graph.facebook.com/v2.0/me/taggable_friends',
params={
'access_token': get_settings().facebook_access_token
})
self.taggable_friends = r.json()
return self.taggable_friends or {}
def __call__(self, fullname, displayname, entry, pos):
fbid = entry.get('facebook')
if fbid:
# return '@[' + fbid + ']'
self.tags.append(fbid)
return displayname
def create_album(name, msg):
app.logger.debug('creating new facebook album %s', name)
resp = requests.post(
'https://graph.facebook.com/v2.0/me/albums', data={
'access_token': get_settings().facebook_access_token,
'name': name,
'message': msg,
'privacy': json.dumps({'value': 'EVERYONE'}),
})
resp.raise_for_status()
app.logger.debug('new facebook album response: %s, %s', resp, resp.text)
return resp.json()['id']
def handle_new_or_edit(post, preview, img_url, post_type,
album_id):
app.logger.debug('publishing to facebook')
#TODO I cannot figure out how to tag people via the FB API
#tagger = PersonTagger()
#preview = util.autolink(preview, url_processor=None, person_processor=tagger)
post_args = {
'access_token': get_settings().facebook_access_token,
'message': preview.strip(),
'actions': json.dumps({'name': 'See Original',
'link': post.permalink}),
#'privacy': json.dumps({'value': 'SELF'}),
'privacy': json.dumps({'value': 'EVERYONE'}),
#'article': post.permalink,
}
if post.title:
post_args['name'] = post.title
is_photo = False
share_link = next(iter(post.repost_of), None)
if share_link:
post_args['link'] = share_link
elif img_url:
if post_type == 'photo':
is_photo = True # special case for posting photos
post_args['url'] = img_url
else:
# link back to the original post, and use the image
# as the preview image
post_args['link'] = post.permalink
post_args['picture'] = img_url
if is_photo:
app.logger.debug('Sending photo %s to album %s', post_args, album_id)
response = requests.post(
'https://graph.facebook.com/v2.0/{}/photos'.format(
album_id if album_id else 'me'),
data=post_args)
else:
app.logger.debug('Sending post %s', post_args)
response = requests.post('https://graph.facebook.com/v2.0/me/feed',
data=post_args)
response.raise_for_status()
app.logger.debug("Got response from facebook %s", response)
if 'json' in response.headers['content-type']:
result = response.json()
app.logger.debug('published to facebook. response {}'.format(result))
if result:
if is_photo:
facebook_photo_id = result['id']
facebook_post_id = result['post_id'] # actually the album
split = facebook_post_id.split('_', 1)
if split and len(split) == 2:
user_id, post_id = split
fb_url = 'https://facebook.com/{}/posts/{}'.format(
user_id, facebook_photo_id)
new_syndication = list(post.syndication)
new_syndication.append(fb_url)
post.syndication = new_syndication
return fb_url
else:
facebook_post_id = result['id']
split = facebook_post_id.split('_', 1)
if split and len(split) == 2:
user_id, post_id = split
fb_url = 'https://facebook.com/{}/posts/{}'.format(
user_id, post_id)
new_syndication = list(post.syndication)
new_syndication.append(fb_url)
post.syndication = new_syndication
return fb_url
def format_markdown_as_facebook(data):
return util.format_as_text(
util.markdown_filter(
data, url_processor=None, person_processor=None))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implementation of SQLAlchemy backend.
"""
import datetime
import warnings
from nova import db
from nova import exception
from nova import flags
from nova import utils
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import exists
from sqlalchemy.sql import func
from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures that the request context has permission to access the
given project.
"""
if is_user_context(context):
if not context.project:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures that the request context has permission to access the
given user.
"""
if is_user_context(context):
if not context.user:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def can_read_deleted(context):
"""Indicates if the context has access to deleted objects."""
if not context:
return False
return context.read_deleted
def require_admin_context(f):
"""Decorator used to indicate that the method requires an
administrator context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator used to indicate that the method requires either
an administrator or normal user context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
if service_ref.topic == 'compute' and \
len(service_ref.compute_node) != 0:
for c in service_ref.compute_node:
c.delete(session=session)
@require_admin_context
def service_get(context, service_id, session=None):
if not session:
session = get_session()
result = session.query(models.Service).\
options(joinedload('compute_node')).\
filter_by(id=service_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get_all(context, disabled=None):
session = get_session()
query = session.query(models.Service).\
filter_by(deleted=can_read_deleted(context))
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
session = get_session()
return session.query(models.Service).\
filter_by(deleted=False).\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
session = get_session()
return session.query(models.Service).\
filter_by(deleted=False).\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
session = get_session()
return session.query(models.Service).\
filter_by(deleted=False).\
filter_by(host=host).\
all()
@require_admin_context
def service_get_all_compute_by_host(context, host):
topic = 'compute'
session = get_session()
result = session.query(models.Service).\
options(joinedload('compute_node')).\
filter_by(deleted=False).\
filter_by(host=host).\
filter_by(topic=topic).\
all()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
filter_by(topic=topic).\
filter_by(deleted=False).\
filter_by(disabled=False).\
outerjoin((subq, models.Service.host == subq.c.host)).\
order_by(sort_value).\
all()
@require_admin_context
def service_get_all_compute_sorted(context):
session = get_session()
with session.begin():
# NOTE(vish): The intended query is below
# SELECT services.*, COALESCE(inst_cores.instance_cores,
# 0)
# FROM services LEFT OUTER JOIN
# (SELECT host, SUM(instances.vcpus) AS instance_cores
# FROM instances GROUP BY host) AS inst_cores
# ON services.host = inst_cores.host
topic = 'compute'
label = 'instance_cores'
subq = session.query(models.Instance.host,
func.sum(models.Instance.vcpus).label(label)).\
filter_by(deleted=False).\
group_by(models.Instance.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_all_network_sorted(context):
session = get_session()
with session.begin():
topic = 'network'
label = 'network_count'
subq = session.query(models.Network.host,
func.count(models.Network.id).label(label)).\
filter_by(deleted=False).\
group_by(models.Network.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_all_volume_sorted(context):
session = get_session()
with session.begin():
topic = 'volume'
label = 'volume_gigabytes'
subq = session.query(models.Volume.host,
func.sum(models.Volume.size).label(label)).\
filter_by(deleted=False).\
group_by(models.Volume.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_by_args(context, host, binary):
session = get_session()
result = session.query(models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not FLAGS.enable_new_services:
service_ref.disabled = True
service_ref.save()
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
service_ref.update(values)
service_ref.save(session=session)
###################
@require_admin_context
def compute_node_get(context, compute_id, session=None):
if not session:
session = get_session()
result = session.query(models.ComputeNode).\
filter_by(id=compute_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_node_create(context, values):
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
@require_admin_context
def compute_node_update(context, compute_id, values):
session = get_session()
with session.begin():
compute_ref = compute_node_get(context, compute_id, session=session)
compute_ref.update(values)
compute_ref.save(session=session)
###################
@require_admin_context
def certificate_get(context, certificate_id, session=None):
if not session:
session = get_session()
result = session.query(models.Certificate).\
filter_by(id=certificate_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.CertificateNotFound(certificate_id=certificate_id)
return result
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_destroy(context, certificate_id):
session = get_session()
with session.begin():
certificate_ref = certificate_get(context,
certificate_id,
session=session)
certificate_ref.delete(session=session)
@require_admin_context
def certificate_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Certificate).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
session = get_session()
return session.query(models.Certificate).\
filter_by(user_id=user_id).\
filter_by(deleted=False).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(_context, user_id, project_id):
session = get_session()
return session.query(models.Certificate).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
all()
@require_admin_context
def certificate_update(context, certificate_id, values):
session = get_session()
with session.begin():
certificate_ref = certificate_get(context,
certificate_id,
session=session)
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save(session=session)
###################
@require_context
def floating_ip_allocate_address(context, host, project_id):
authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = session.query(models.FloatingIp).\
filter_by(host=host).\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(deleted=False).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise db.NoMoreAddresses()
floating_ip_ref['project_id'] = project_id
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
floating_ip_ref.save()
return floating_ip_ref['address']
@require_context
def floating_ip_count_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
return session.query(models.FloatingIp).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
filter_by(deleted=False).\
count()
@require_context
def floating_ip_fixed_ip_associate(context, floating_address, fixed_address):
session = get_session()
with session.begin():
# TODO(devcamcar): How to ensure floating_id belongs to user?
floating_ip_ref = floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = fixed_ip_get_by_address(context,
fixed_address,
session=session)
floating_ip_ref.fixed_ip = fixed_ip_ref
floating_ip_ref.save(session=session)
@require_context
def floating_ip_deallocate(context, address):
session = get_session()
with session.begin():
# TODO(devcamcar): How to ensure floating id belongs to user?
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref['project_id'] = None
floating_ip_ref['auto_assigned'] = False
floating_ip_ref.save(session=session)
@require_context
def floating_ip_destroy(context, address):
session = get_session()
with session.begin():
# TODO(devcamcar): Ensure address belongs to user.
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref.delete(session=session)
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
# TODO(devcamcar): Ensure address belongs to user.
# Does get_floating_ip_by_address handle this?
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
fixed_ip_ref = floating_ip_ref.fixed_ip
if fixed_ip_ref:
fixed_ip_address = fixed_ip_ref['address']
else:
fixed_ip_address = None
floating_ip_ref.fixed_ip = None
floating_ip_ref.save(session=session)
return fixed_ip_address
@require_context
def floating_ip_set_auto_assigned(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref.auto_assigned = True
floating_ip_ref.save(session=session)
@require_admin_context
def floating_ip_get_all(context):
session = get_session()
return session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(deleted=False).\
all()
@require_admin_context
def floating_ip_get_all_by_host(context, host):
session = get_session()
return session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(host=host).\
filter_by(deleted=False).\
all()
@require_context
def floating_ip_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
return session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
filter_by(deleted=False).\
all()
@require_context
def floating_ip_get_by_address(context, address, session=None):
# TODO(devcamcar): Ensure the address belongs to user.
if not session:
session = get_session()
result = session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.network')).\
filter_by(address=address).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.FloatingIpNotFound(fixed_ip=address)
return result
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context, address, session)
for (key, value) in values.iteritems():
floating_ip_ref[key] = value
floating_ip_ref.save(session=session)
###################
@require_context
def fixed_ip_associate(context, address, instance_id):
session = get_session()
with session.begin():
instance = instance_get(context, instance_id, session=session)
fixed_ip_ref = session.query(models.FixedIp).\
filter_by(address=address).\
filter_by(deleted=False).\
filter_by(instance=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise db.NoMoreAddresses()
fixed_ip_ref.instance = instance
session.add(fixed_ip_ref)
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_id):
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = session.query(models.FixedIp).\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(deleted=False).\
filter_by(instance=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise db.NoMoreAddresses()
if not fixed_ip_ref.network:
fixed_ip_ref.network = network_get(context,
network_id,
session=session)
fixed_ip_ref.instance = instance_get(context,
instance_id,
session=session)
session.add(fixed_ip_ref)
return fixed_ip_ref['address']
@require_context
def fixed_ip_create(_context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
fixed_ip_ref.save()
return fixed_ip_ref['address']
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
fixed_ip_ref = fixed_ip_get_by_address(context,
address,
session=session)
fixed_ip_ref.instance = None
fixed_ip_ref.save(session=session)
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
session = get_session()
inner_q = session.query(models.Network.id).\
filter_by(host=host).\
subquery()
result = session.query(models.FixedIp).\
filter(models.FixedIp.network_id.in_(inner_q)).\
filter(models.FixedIp.updated_at < time).\
filter(models.FixedIp.instance_id != None).\
filter_by(allocated=0).\
update({'instance_id': None,
'leased': 0,
'updated_at': datetime.datetime.utcnow()},
synchronize_session='fetch')
return result
@require_admin_context
def fixed_ip_get_all(context, session=None):
if not session:
session = get_session()
result = session.query(models.FixedIp).all()
if not result:
raise exception.NoFloatingIpsDefined()
return result
@require_admin_context
def fixed_ip_get_all_by_host(context, host=None):
session = get_session()
result = session.query(models.FixedIp).\
join(models.FixedIp.instance).\
filter_by(state=1).\
filter_by(host=host).\
all()
if not result:
raise exception.NoFloatingIpsDefinedForHost(host=host)
return result
@require_context
def fixed_ip_get_by_address(context, address, session=None):
if not session:
session = get_session()
result = session.query(models.FixedIp).\
filter_by(address=address).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload('network')).\
options(joinedload('instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(fixed_ip=address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
return result
@require_context
def fixed_ip_get_instance(context, address):
fixed_ip_ref = fixed_ip_get_by_address(context, address)
return fixed_ip_ref.instance
@require_context
def fixed_ip_get_all_by_instance(context, instance_id):
session = get_session()
rv = session.query(models.FixedIp).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False)
if not rv:
raise exception.NoFloatingIpsFoundForInstance(instance_id=instance_id)
return rv
@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
mac = utils.to_mac(address)
result = session.query(models.Instance).\
filter_by(mac_address=mac).\
first()
return result
@require_admin_context
def fixed_ip_get_network(context, address):
fixed_ip_ref = fixed_ip_get_by_address(context, address)
return fixed_ip_ref.network
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
fixed_ip_ref = fixed_ip_get_by_address(context,
address,
session=session)
fixed_ip_ref.update(values)
fixed_ip_ref.save(session=session)
###################
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
metadata = values.get('metadata')
metadata_refs = []
if metadata:
for k, v in metadata.iteritems():
metadata_ref = models.InstanceMetadata()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
values['metadata'] = metadata_refs
instance_ref = models.Instance()
instance_ref.update(values)
session = get_session()
with session.begin():
instance_ref.save(session=session)
return instance_ref
@require_admin_context
def instance_data_get_for_project(context, project_id):
session = get_session()
result = session.query(func.count(models.Instance.id),
func.sum(models.Instance.vcpus)).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_context
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def instance_get(context, instance_id, session=None):
if not session:
session = get_session()
result = None
if is_admin_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_admin_context
def instance_get_all(context):
session = get_session()
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_admin_context
def instance_get_all_by_user(context, user_id):
session = get_session()
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(user_id=user_id).\
all()
@require_admin_context
def instance_get_all_by_host(context, host):
session = get_session()
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_context
def instance_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_context
def instance_get_all_by_reservation(context, reservation_id):
session = get_session()
if is_admin_context(context):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
elif is_user_context(context):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=False).\
all()
@require_admin_context
def instance_get_project_vpn(context, project_id):
session = get_session()
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(image_id=str(FLAGS.vpn_image_id)).\
filter_by(deleted=can_read_deleted(context)).\
first()
@require_context
def instance_get_fixed_address(context, instance_id):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
if not instance_ref.fixed_ip:
return None
return instance_ref.fixed_ip['address']
@require_context
def instance_get_fixed_address_v6(context, instance_id):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
network_ref = network_get_by_instance(context, instance_id)
prefix = network_ref.cidr_v6
mac = instance_ref.mac_address
return utils.to_global_ipv6(prefix, mac)
@require_context
def instance_get_floating_address(context, instance_id):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
if not instance_ref.fixed_ip:
return None
if not instance_ref.fixed_ip.floating_ips:
return None
# NOTE(vish): this just returns the first floating ip
return instance_ref.fixed_ip.floating_ips[0]['address']
@require_admin_context
def instance_set_state(context, instance_id, state, description=None):
# TODO(devcamcar): Move this out of models and into driver
from nova.compute import power_state
if not description:
description = power_state.name(state)
db.instance_update(context,
instance_id,
{'state': state,
'state_description': description})
@require_context
def instance_update(context, instance_id, values):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
instance_ref.update(values)
instance_ref.save(session=session)
return instance_ref
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance"""
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
security_group_ref = security_group_get(context,
security_group_id,
session=session)
instance_ref.security_groups += [security_group_ref]
instance_ref.save(session=session)
@require_context
def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
session = get_session()
result = session.query(models.Instance).\
filter_by(host=hostname).\
filter_by(project_id=proj_id).\
filter_by(deleted=False).\
value(func.sum(models.Instance.vcpus))
if not result:
return 0
return result
@require_context
def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
session = get_session()
result = session.query(models.Instance).\
filter_by(host=hostname).\
filter_by(project_id=proj_id).\
filter_by(deleted=False).\
value(func.sum(models.Instance.memory_mb))
if not result:
return 0
return result
@require_context
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
session = get_session()
result = session.query(models.Instance).\
filter_by(host=hostname).\
filter_by(project_id=proj_id).\
filter_by(deleted=False).\
value(func.sum(models.Instance.local_gb))
if not result:
return 0
return result
@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
action_ref.update(values)
session = get_session()
with session.begin():
action_ref.save(session=session)
return action_ref
@require_admin_context
def instance_get_actions(context, instance_id):
"""Return the actions associated to the given instance id"""
session = get_session()
return session.query(models.InstanceActions).\
filter_by(instance_id=instance_id).\
all()
###################
@require_context
def key_pair_create(context, values):
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
@require_context
def key_pair_destroy(context, user_id, name):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
key_pair_ref = key_pair_get(context, user_id, name, session=session)
key_pair_ref.delete(session=session)
@require_context
def key_pair_destroy_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
session.query(models.KeyPair).\
filter_by(user_id=user_id).\
update({'deleted': 1,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def key_pair_get(context, user_id, name, session=None):
authorize_user_context(context, user_id)
if not session:
session = get_session()
result = session.query(models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
return session.query(models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(deleted=False).\
all()
###################
@require_admin_context
def network_associate(context, project_id):
session = get_session()
with session.begin():
network_ref = session.query(models.Network).\
filter_by(deleted=False).\
filter_by(project_id=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref:
raise db.NoMoreNetworks()
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
@require_admin_context
def network_count(context):
session = get_session()
return session.query(models.Network).\
filter_by(deleted=can_read_deleted(context)).\
count()
@require_admin_context
def network_count_allocated_ips(context, network_id):
session = get_session()
return session.query(models.FixedIp).\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
filter_by(deleted=False).\
count()
@require_admin_context
def network_count_available_ips(context, network_id):
session = get_session()
return session.query(models.FixedIp).\
filter_by(network_id=network_id).\
filter_by(allocated=False).\
filter_by(reserved=False).\
filter_by(deleted=False).\
count()
@require_admin_context
def network_count_reserved_ips(context, network_id):
session = get_session()
return session.query(models.FixedIp).\
filter_by(network_id=network_id).\
filter_by(reserved=True).\
filter_by(deleted=False).\
count()
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
network_ref.update(values)
try:
network_ref.save()
return network_ref
except IntegrityError:
return None
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
network_ref = network_get(context, network_id=network_id, \
session=session)
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id):
network_update(context, network_id, {'project_id': None,
'host': None})
@require_admin_context
def network_disassociate_all(context):
session = get_session()
session.query(models.Network).\
update({'project_id': None,
'updated_at': literal_column('updated_at')})
@require_context
def network_get(context, network_id, session=None):
if not session:
session = get_session()
result = None
if is_admin_context(context):
result = session.query(models.Network).\
filter_by(id=network_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Network).\
filter_by(project_id=context.project_id).\
filter_by(id=network_id).\
filter_by(deleted=False).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_admin_context
def network_get_all(context):
session = get_session()
result = session.query(models.Network)
if not result:
raise exception.NoNetworksFound()
return result
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable=C0103
@require_admin_context
def network_get_associated_fixed_ips(context, network_id):
session = get_session()
return session.query(models.FixedIp).\
options(joinedload_all('instance')).\
filter_by(network_id=network_id).\
filter(models.FixedIp.instance_id != None).\
filter_by(deleted=False).\
all()
@require_admin_context
def network_get_by_bridge(context, bridge):
session = get_session()
result = session.query(models.Network).\
filter_by(bridge=bridge).\
filter_by(deleted=False).\
first()
if not result:
raise exception.NetworkNotFoundForBridge(bridge=bridge)
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
session = get_session()
result = session.query(models.Network).\
filter_by(cidr=cidr).first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@require_admin_context
def network_get_by_instance(_context, instance_id):
session = get_session()
rv = session.query(models.Network).\
filter_by(deleted=False).\
join(models.Network.fixed_ips).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
first()
if not rv:
raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return rv
@require_admin_context
def network_get_all_by_instance(_context, instance_id):
session = get_session()
rv = session.query(models.Network).\
filter_by(deleted=False).\
join(models.Network.fixed_ips).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False)
if not rv:
raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return rv
@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
network_ref = session.query(models.Network).\
filter_by(id=network_id).\
filter_by(deleted=False).\
with_lockmode('update').\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref['host']:
network_ref['host'] = host_id
session.add(network_ref)
return network_ref['host']
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = network_get(context, network_id, session=session)
network_ref.update(values)
network_ref.save(session=session)
###################
@require_context
def project_get_network(context, project_id, associate=True):
session = get_session()
result = session.query(models.Network).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
first()
if not result:
if not associate:
return None
try:
return network_associate(context, project_id)
except IntegrityError:
# NOTE(vish): We hit this if there is a race and two
# processes are attempting to allocate the
# network at the same time
result = session.query(models.Network).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
first()
return result
@require_context
def project_get_network_v6(context, project_id):
return project_get_network(context, project_id)
###################
def queue_get_for(_context, topic, physical_node_id):
# FIXME(ja): this should be servername?
return "%s.%s" % (topic, physical_node_id)
###################
@require_admin_context
def export_device_count(context):
session = get_session()
return session.query(models.ExportDevice).\
filter_by(deleted=can_read_deleted(context)).\
count()
@require_admin_context
def export_device_create_safe(context, values):
export_device_ref = models.ExportDevice()
export_device_ref.update(values)
try:
export_device_ref.save()
return export_device_ref
except IntegrityError:
return None
###################
@require_admin_context
def iscsi_target_count_by_host(context, host):
session = get_session()
return session.query(models.IscsiTarget).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(host=host).\
count()
@require_admin_context
def iscsi_target_create_safe(context, values):
iscsi_target_ref = models.IscsiTarget()
for (key, value) in values.iteritems():
iscsi_target_ref[key] = value
try:
iscsi_target_ref.save()
return iscsi_target_ref
except IntegrityError:
return None
###################
@require_admin_context
def auth_token_destroy(context, token_id):
session = get_session()
with session.begin():
token_ref = auth_token_get(context, token_id, session=session)
token_ref.delete(session=session)
@require_admin_context
def auth_token_get(context, token_hash, session=None):
if session is None:
session = get_session()
tk = session.query(models.AuthToken).\
filter_by(token_hash=token_hash).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not tk:
raise exception.AuthTokenNotFound(token=token_hash)
return tk
@require_admin_context
def auth_token_update(context, token_hash, values):
session = get_session()
with session.begin():
token_ref = auth_token_get(context, token_hash, session=session)
token_ref.update(values)
token_ref.save(session=session)
@require_admin_context
def auth_token_create(_context, token):
tk = models.AuthToken()
tk.update(token)
tk.save()
return tk
###################
@require_admin_context
def quota_get(context, project_id, session=None):
if not session:
session = get_session()
result = session.query(models.Quota).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_admin_context
def quota_create(context, values):
quota_ref = models.Quota()
quota_ref.update(values)
quota_ref.save()
return quota_ref
@require_admin_context
def quota_update(context, project_id, values):
session = get_session()
with session.begin():
quota_ref = quota_get(context, project_id, session=session)
quota_ref.update(values)
quota_ref.save(session=session)
@require_admin_context
def quota_destroy(context, project_id):
session = get_session()
with session.begin():
quota_ref = quota_get(context, project_id, session=session)
quota_ref.delete(session=session)
###################
@require_admin_context
def volume_allocate_shelf_and_blade(context, volume_id):
session = get_session()
with session.begin():
export_device = session.query(models.ExportDevice).\
filter_by(volume=None).\
filter_by(deleted=False).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not export_device:
raise db.NoMoreBlades()
export_device.volume_id = volume_id
session.add(export_device)
return (export_device.shelf_id, export_device.blade_id)
@require_admin_context
def volume_allocate_iscsi_target(context, volume_id, host):
session = get_session()
with session.begin():
iscsi_target_ref = session.query(models.IscsiTarget).\
filter_by(volume=None).\
filter_by(host=host).\
filter_by(deleted=False).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not iscsi_target_ref:
raise db.NoMoreTargets()
iscsi_target_ref.volume_id = volume_id
session.add(iscsi_target_ref)
return iscsi_target_ref.target_num
@require_admin_context
def volume_attached(context, volume_id, instance_id, mountpoint):
session = get_session()
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref['status'] = 'in-use'
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
volume_ref.instance = instance_get(context, instance_id,
session=session)
volume_ref.save(session=session)
@require_context
def volume_create(context, values):
volume_ref = models.Volume()
volume_ref.update(values)
session = get_session()
with session.begin():
volume_ref.save(session=session)
return volume_ref
@require_admin_context
def volume_data_get_for_project(context, project_id):
session = get_session()
result = session.query(func.count(models.Volume.id),
func.sum(models.Volume.size)).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
session.query(models.Volume).\
filter_by(id=volume_id).\
update({'deleted': 1,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.ExportDevice).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
@require_admin_context
def volume_detached(context, volume_id):
session = get_session()
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref['status'] = 'available'
volume_ref['mountpoint'] = None
volume_ref['attach_status'] = 'detached'
volume_ref.instance = None
volume_ref.save(session=session)
@require_context
def volume_get(context, volume_id, session=None):
if not session:
session = get_session()
result = None
if is_admin_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
filter_by(project_id=context.project_id).\
filter_by(id=volume_id).\
filter_by(deleted=False).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_admin_context
def volume_get_all(context):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_admin_context
def volume_get_all_by_host(context, host):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_admin_context
def volume_get_all_by_instance(context, instance_id):
session = get_session()
result = session.query(models.Volume).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
all()
if not result:
raise exception.VolumeNotFoundForInstance(instance_id=instance_id)
return result
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_admin_context
def volume_get_instance(context, volume_id):
session = get_session()
result = session.query(models.Volume).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload('instance')).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result.instance
@require_admin_context
def volume_get_shelf_and_blade(context, volume_id):
session = get_session()
result = session.query(models.ExportDevice).\
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.ExportDeviceNotFoundForVolume(volume_id=volume_id)
return (result.shelf_id, result.blade_id)
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
session = get_session()
result = session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
return result.target_num
@require_context
def volume_update(context, volume_id, values):
session = get_session()
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref.update(values)
volume_ref.save(session=session)
###################
@require_context
def security_group_get_all(context):
session = get_session()
return session.query(models.SecurityGroup).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload_all('rules')).\
all()
@require_context
def security_group_get(context, security_group_id, session=None):
if not session:
session = get_session()
if is_admin_context(context):
result = session.query(models.SecurityGroup).\
filter_by(deleted=can_read_deleted(context),).\
filter_by(id=security_group_id).\
options(joinedload_all('rules')).\
first()
else:
result = session.query(models.SecurityGroup).\
filter_by(deleted=False).\
filter_by(id=security_group_id).\
filter_by(project_id=context.project_id).\
options(joinedload_all('rules')).\
first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name):
session = get_session()
result = session.query(models.SecurityGroup).\
filter_by(project_id=project_id).\
filter_by(name=group_name).\
filter_by(deleted=False).\
options(joinedload_all('rules')).\
options(joinedload_all('instances')).\
first()
if not result:
raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
session = get_session()
return session.query(models.SecurityGroup).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
options(joinedload_all('rules')).\
all()
@require_context
def security_group_get_by_instance(context, instance_id):
session = get_session()
return session.query(models.SecurityGroup).\
filter_by(deleted=False).\
options(joinedload_all('rules')).\
join(models.SecurityGroup.instances).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
all()
@require_context
def security_group_exists(context, project_id, group_name):
try:
group = security_group_get_by_name(context, project_id, group_name)
return group is not None
except exception.NotFound:
return False
@require_context
def security_group_create(context, values):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
security_group_ref.save()
return security_group_ref
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
filter_by(id=security_group_id).\
update({'deleted': 1,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
update({'deleted': 1,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
update({'deleted': 1,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def security_group_destroy_all(context, session=None):
if not session:
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
update({'deleted': 1,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
update({'deleted': 1,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
###################
@require_context
def security_group_rule_get(context, security_group_rule_id, session=None):
if not session:
session = get_session()
if is_admin_context(context):
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(id=security_group_rule_id).\
first()
else:
# TODO(vish): Join to group and check for project_id
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=False).\
filter_by(id=security_group_rule_id).\
first()
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
session=None):
if not session:
session = get_session()
if is_admin_context(context):
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(parent_group_id=security_group_id).\
all()
else:
# TODO(vish): Join to group and check for project_id
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=False).\
filter_by(parent_group_id=security_group_id).\
all()
return result
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id,
session=None):
if not session:
session = get_session()
if is_admin_context(context):
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(group_id=security_group_id).\
all()
else:
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=False).\
filter_by(group_id=security_group_id).\
all()
return result
@require_context
def security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save()
return security_group_rule_ref
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
session = get_session()
with session.begin():
security_group_rule = security_group_rule_get(context,
security_group_rule_id,
session=session)
security_group_rule.delete(session=session)
###################
@require_admin_context
def user_get(context, id, session=None):
if not session:
session = get_session()
result = session.query(models.User).\
filter_by(id=id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.UserNotFound(user_id=id)
return result
@require_admin_context
def user_get_by_access_key(context, access_key, session=None):
if not session:
session = get_session()
result = session.query(models.User).\
filter_by(access_key=access_key).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.AccessKeyNotFound(access_key=access_key)
return result
@require_admin_context
def user_create(_context, values):
user_ref = models.User()
user_ref.update(values)
user_ref.save()
return user_ref
@require_admin_context
def user_delete(context, id):
session = get_session()
with session.begin():
session.query(models.UserProjectAssociation).\
filter_by(user_id=id).\
delete()
session.query(models.UserRoleAssociation).\
filter_by(user_id=id).\
delete()
session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=id).\
delete()
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
def user_get_all(context):
session = get_session()
return session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
all()
def project_create(_context, values):
project_ref = models.Project()
project_ref.update(values)
project_ref.save()
return project_ref
def project_add_member(context, project_id, user_id):
session = get_session()
with session.begin():
project_ref = project_get(context, project_id, session=session)
user_ref = user_get(context, user_id, session=session)
project_ref.members += [user_ref]
project_ref.save(session=session)
def project_get(context, id, session=None):
if not session:
session = get_session()
result = session.query(models.Project).\
filter_by(deleted=False).\
filter_by(id=id).\
options(joinedload_all('members')).\
first()
if not result:
raise exception.ProjectNotFound(project_id=id)
return result
def project_get_all(context):
session = get_session()
return session.query(models.Project).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload_all('members')).\
all()
def project_get_by_user(context, user_id):
session = get_session()
user = session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(id=user_id).\
options(joinedload_all('projects')).\
first()
if not user:
raise exception.UserNotFound(user_id=user_id)
return user.projects
def project_remove_member(context, project_id, user_id):
session = get_session()
project = project_get(context, project_id, session=session)
user = user_get(context, user_id, session=session)
if user in project.members:
project.members.remove(user)
project.save(session=session)
def user_update(context, user_id, values):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
user_ref.update(values)
user_ref.save(session=session)
def project_update(context, project_id, values):
session = get_session()
with session.begin():
project_ref = project_get(context, project_id, session=session)
project_ref.update(values)
project_ref.save(session=session)
def project_delete(context, id):
session = get_session()
with session.begin():
session.query(models.UserProjectAssociation).\
filter_by(project_id=id).\
delete()
session.query(models.UserProjectRoleAssociation).\
filter_by(project_id=id).\
delete()
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
def user_get_roles(context, user_id):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
return [role.role for role in user_ref['roles']]
def user_get_roles_for_project(context, user_id, project_id):
session = get_session()
with session.begin():
res = session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
return [association.role for association in res]
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
filter_by(role=role).\
delete()
def user_remove_role(context, user_id, role):
session = get_session()
with session.begin():
res = session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(role=role).\
all()
for role in res:
session.delete(role)
def user_add_role(context, user_id, role):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
models.UserRoleAssociation(user=user_ref, role=role).\
save(session=session)
def user_add_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
project_ref = project_get(context, project_id, session=session)
models.UserProjectRoleAssociation(user_id=user_ref['id'],
project_id=project_ref['id'],
role=role).save(session=session)
###################
@require_admin_context
def host_get_networks(context, host):
session = get_session()
with session.begin():
return session.query(models.Network).\
filter_by(deleted=False).\
filter_by(host=host).\
all()
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = migration_get(context, id, session=session)
migration.update(values)
migration.save(session=session)
return migration
@require_admin_context
def migration_get(context, id, session=None):
if not session:
session = get_session()
result = session.query(models.Migration).\
filter_by(id=id).first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get_by_instance_and_status(context, instance_id, status):
session = get_session()
result = session.query(models.Migration).\
filter_by(instance_id=instance_id).\
filter_by(status=status).first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status=status)
return result
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
pool.save()
return pool
def console_pool_get(context, pool_id):
session = get_session()
result = session.query(models.ConsolePool).\
filter_by(deleted=False).\
filter_by(id=pool_id).\
first()
if not result:
raise exception.ConsolePoolNotFound(pool_id=pool_id)
return result
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
session = get_session()
result = session.query(models.ConsolePool).\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
filter_by(deleted=False).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(host=host,
console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
session = get_session()
return session.query(models.ConsolePool).\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(deleted=False).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# consoles are meant to be transient. (mdragon)
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_id):
session = get_session()
result = session.query(models.Console).\
filter_by(pool_id=pool_id).\
filter_by(instance_id=instance_id).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id,
instance_id=instance_id)
return result
def console_get_all_by_instance(context, instance_id):
session = get_session()
results = session.query(models.Console).\
filter_by(instance_id=instance_id).\
options(joinedload('pool')).\
all()
return results
def console_get(context, console_id, instance_id=None):
session = get_session()
query = session.query(models.Console).\
filter_by(id=console_id)
if instance_id:
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
if instance_id:
raise exception.ConsoleNotFoundForInstance(console_id=console_id,
instance_id=instance_id)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def instance_type_create(_context, values):
try:
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
instance_type_ref.save()
except Exception, e:
raise exception.DBError(e)
return instance_type_ref
@require_context
def instance_type_get_all(context, inactive=False):
"""
Returns a dict describing all instance_types with name as key.
"""
session = get_session()
if inactive:
inst_types = session.query(models.InstanceTypes).\
order_by("name").\
all()
else:
inst_types = session.query(models.InstanceTypes).\
filter_by(deleted=False).\
order_by("name").\
all()
if inst_types:
inst_dict = {}
for i in inst_types:
inst_dict[i['name']] = dict(i)
return inst_dict
else:
raise exception.NoInstanceTypesFound()
@require_context
def instance_type_get_by_id(context, id):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
filter_by(id=id).\
first()
if not inst_type:
raise exception.InstanceTypeNotFound(instance_type=id)
else:
return dict(inst_type)
@require_context
def instance_type_get_by_name(context, name):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
filter_by(name=name).\
first()
if not inst_type:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return dict(inst_type)
@require_context
def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
filter_by(flavorid=int(id)).\
first()
if not inst_type:
raise exception.FlavorNotFound(flavor_id=id)
else:
return dict(inst_type)
@require_admin_context
def instance_type_destroy(context, name):
""" Marks specific instance_type as deleted"""
session = get_session()
instance_type_ref = session.query(models.InstanceTypes).\
filter_by(name=name)
records = instance_type_ref.update(dict(deleted=True))
if records == 0:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
@require_admin_context
def instance_type_purge(context, name):
""" Removes specific instance_type from DB
Usually instance_type_destroy should be used
"""
session = get_session()
instance_type_ref = session.query(models.InstanceTypes).\
filter_by(name=name)
records = instance_type_ref.delete()
if records == 0:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
####################
@require_admin_context
def zone_create(context, values):
zone = models.Zone()
zone.update(values)
zone.save()
return zone
@require_admin_context
def zone_update(context, zone_id, values):
session = get_session()
zone = session.query(models.Zone).filter_by(id=zone_id).first()
if not zone:
raise exception.ZoneNotFound(zone_id=zone_id)
zone.update(values)
zone.save()
return zone
@require_admin_context
def zone_delete(context, zone_id):
session = get_session()
with session.begin():
session.query(models.Zone).\
filter_by(id=zone_id).\
delete()
@require_admin_context
def zone_get(context, zone_id):
session = get_session()
result = session.query(models.Zone).filter_by(id=zone_id).first()
if not result:
raise exception.ZoneNotFound(zone_id=zone_id)
return result
@require_admin_context
def zone_get_all(context):
session = get_session()
return session.query(models.Zone).all()
####################
@require_context
def instance_metadata_get(context, instance_id):
session = get_session()
meta_results = session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
all()
meta_dict = {}
for i in meta_results:
meta_dict[i['key']] = i['value']
return meta_dict
@require_context
def instance_metadata_delete(context, instance_id, key):
session = get_session()
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
filter_by(key=key).\
filter_by(deleted=False).\
update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def instance_metadata_get_item(context, instance_id, key):
session = get_session()
meta_result = session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
filter_by(key=key).\
filter_by(deleted=False).\
first()
if not meta_result:
raise exception.InstanceMetadataNotFound(metadata_key=key,
instance_id=instance_id)
return meta_result
@require_context
def instance_metadata_update_or_create(context, instance_id, metadata):
session = get_session()
meta_ref = None
for key, value in metadata.iteritems():
try:
meta_ref = instance_metadata_get_item(context, instance_id, key,
session)
except:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": value,
"instance_id": instance_id,
"deleted": 0})
meta_ref.save(session=session)
return metadata
|
|
"""Test the nifti_region module
Functions in this file only test features added by the NiftiLabelsMasker class,
not the underlying functions (clean(), img_to_signals_labels(), etc.). See
test_masking.py and test_signal.py for details.
"""
from nose.tools import assert_raises, assert_equal, assert_true
import numpy as np
import nibabel
from ..nifti_region import NiftiLabelsMasker, NiftiMapsMasker
from ..._utils import testing
from ..._utils import as_ndarray
def generate_random_img(shape, length=1, affine=np.eye(4),
rand_gen=np.random.RandomState(0)):
data = rand_gen.randn(*(shape + (length,)))
return nibabel.Nifti1Image(data, affine), nibabel.Nifti1Image(
as_ndarray(data[..., 0] > 0.2, dtype=np.int8), affine)
def test_nifti_labels_masker():
# Check working of shape/affine checks
shape1 = (13, 11, 12)
affine1 = np.eye(4)
shape2 = (12, 10, 14)
affine2 = np.diag((1, 2, 3, 1))
n_regions = 9
length = 3
fmri11_img, mask11_img = generate_random_img(shape1, affine=affine1,
length=length)
fmri12_img, mask12_img = generate_random_img(shape1, affine=affine2,
length=length)
fmri21_img, mask21_img = generate_random_img(shape2, affine=affine1,
length=length)
labels11_img = testing.generate_labeled_regions(shape1, affine=affine1,
n_regions=n_regions)
# No exception raised here
masker11 = NiftiLabelsMasker(labels11_img, resampling_target=None)
signals11 = masker11.fit().transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
masker11 = NiftiLabelsMasker(labels11_img, mask_img=mask11_img,
resampling_target=None)
signals11 = masker11.fit().transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
# Test all kinds of mismatch between shapes and between affines
masker11 = NiftiLabelsMasker(labels11_img, resampling_target=None)
masker11.fit()
assert_raises(ValueError, masker11.transform, fmri12_img)
assert_raises(ValueError, masker11.transform, fmri21_img)
masker11 = NiftiLabelsMasker(labels11_img, mask_img=mask12_img,
resampling_target=None)
assert_raises(ValueError, masker11.fit)
masker11 = NiftiLabelsMasker(labels11_img, mask_img=mask21_img,
resampling_target=None)
assert_raises(ValueError, masker11.fit)
# Transform, with smoothing (smoke test)
masker11 = NiftiLabelsMasker(labels11_img, smoothing_fwhm=3,
resampling_target=None)
signals11 = masker11.fit().transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
masker11 = NiftiLabelsMasker(labels11_img, smoothing_fwhm=3,
resampling_target=None)
signals11 = masker11.fit_transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
# Call inverse transform (smoke test)
fmri11_img_r = masker11.inverse_transform(signals11)
assert_equal(fmri11_img_r.shape, fmri11_img.shape)
np.testing.assert_almost_equal(fmri11_img_r.get_affine(),
fmri11_img.get_affine())
def test_nifti_labels_masker_2():
# Test resampling in NiftiLabelsMasker
shape1 = (10, 11, 12)
affine = np.eye(4)
# mask
shape2 = (16, 17, 18)
# labels
shape3 = (13, 14, 15)
n_regions = 9
length = 3
fmri11_img, _ = generate_random_img(shape1, affine=affine,
length=length)
_, mask22_img = generate_random_img(shape2, affine=affine,
length=length)
labels33_img = testing.generate_labeled_regions(shape3, n_regions,
affine=affine)
# Test error checking
assert_raises(ValueError, NiftiLabelsMasker, labels33_img,
resampling_target="mask")
assert_raises(ValueError, NiftiLabelsMasker, labels33_img,
resampling_target="invalid")
# Target: labels
masker = NiftiLabelsMasker(labels33_img, mask_img=mask22_img,
resampling_target="labels")
masker.fit()
np.testing.assert_almost_equal(masker.labels_img_.get_affine(),
labels33_img.get_affine())
assert_equal(masker.labels_img_.shape, labels33_img.shape)
np.testing.assert_almost_equal(masker.mask_img_.get_affine(),
masker.labels_img_.get_affine())
assert_equal(masker.mask_img_.shape, masker.labels_img_.shape[:3])
transformed = masker.transform(fmri11_img)
assert_equal(transformed.shape, (length, n_regions))
fmri11_img_r = masker.inverse_transform(transformed)
np.testing.assert_almost_equal(fmri11_img_r.get_affine(),
masker.labels_img_.get_affine())
assert_equal(fmri11_img_r.shape,
(masker.labels_img_.shape[:3] + (length,)))
# Test with clipped labels: mask does not contain all labels.
# Shapes do matter in that case, because there is some resampling
# taking place.
shape1 = (10, 11, 12) # fmri
shape2 = (8, 9, 10) # mask
shape3 = (16, 18, 20) # maps
n_regions = 9
length = 21
fmri11_img, _ = generate_random_img(shape1, affine=affine,
length=length)
_, mask22_img = generate_random_img(shape2, affine=affine,
length=length)
# Target: labels
labels33_img = testing.generate_labeled_regions(shape3, n_regions,
affine=affine)
masker = NiftiLabelsMasker(labels33_img, mask_img=mask22_img,
resampling_target="labels")
masker.fit()
np.testing.assert_almost_equal(masker.labels_img_.get_affine(),
labels33_img.get_affine())
assert_equal(masker.labels_img_.shape, labels33_img.shape)
np.testing.assert_almost_equal(masker.mask_img_.get_affine(),
masker.labels_img_.get_affine())
assert_equal(masker.mask_img_.shape, masker.labels_img_.shape[:3])
uniq_labels = np.unique(masker.labels_img_.get_data())
assert_equal(uniq_labels[0], 0)
assert_true(len(uniq_labels) - 1 == n_regions)
transformed = masker.transform(fmri11_img)
assert_equal(transformed.shape, (length, n_regions))
# Some regions have been clipped. Resulting signal must be zero
assert_true((transformed.var(axis=0) == 0).sum() < n_regions)
fmri11_img_r = masker.inverse_transform(transformed)
np.testing.assert_almost_equal(fmri11_img_r.get_affine(),
masker.labels_img_.get_affine())
assert_equal(fmri11_img_r.shape,
(masker.labels_img_.shape[:3] + (length,)))
def test_nifti_maps_masker():
# Check working of shape/affine checks
shape1 = (13, 11, 12)
affine1 = np.eye(4)
shape2 = (12, 10, 14)
affine2 = np.diag((1, 2, 3, 1))
n_regions = 9
length = 3
fmri11_img, mask11_img = generate_random_img(shape1, affine=affine1,
length=length)
fmri12_img, mask12_img = generate_random_img(shape1, affine=affine2,
length=length)
fmri21_img, mask21_img = generate_random_img(shape2, affine=affine1,
length=length)
labels11_img, labels_mask_img = \
testing.generate_maps(shape1, n_regions, affine=affine1)
# No exception raised here
for create_files in (True, False):
with testing.write_tmp_imgs(labels11_img, create_files=create_files) \
as labels11:
masker11 = NiftiMapsMasker(labels11, resampling_target=None)
signals11 = masker11.fit().transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
masker11 = NiftiMapsMasker(labels11_img, mask_img=mask11_img,
resampling_target=None)
signals11 = masker11.fit().transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
# Test all kinds of mismatches between shapes and between affines
for create_files in (True, False):
with testing.write_tmp_imgs(labels11_img, mask12_img,
create_files=create_files) as images:
labels11, mask12 = images
masker11 = NiftiMapsMasker(labels11, resampling_target=None)
masker11.fit()
assert_raises(ValueError, masker11.transform, fmri12_img)
assert_raises(ValueError, masker11.transform, fmri21_img)
masker11 = NiftiMapsMasker(labels11, mask_img=mask12,
resampling_target=None)
assert_raises(ValueError, masker11.fit)
masker11 = NiftiMapsMasker(labels11_img, mask_img=mask21_img,
resampling_target=None)
assert_raises(ValueError, masker11.fit)
# Transform, with smoothing (smoke test)
masker11 = NiftiMapsMasker(labels11_img, smoothing_fwhm=3,
resampling_target=None)
signals11 = masker11.fit().transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
masker11 = NiftiMapsMasker(labels11_img, smoothing_fwhm=3,
resampling_target=None)
signals11 = masker11.fit_transform(fmri11_img)
assert_equal(signals11.shape, (length, n_regions))
# Call inverse transform (smoke test)
fmri11_img_r = masker11.inverse_transform(signals11)
assert_equal(fmri11_img_r.shape, fmri11_img.shape)
np.testing.assert_almost_equal(fmri11_img_r.get_affine(),
fmri11_img.get_affine())
def test_nifti_maps_masker_2():
# Test resampling in NiftiMapsMasker
affine = np.eye(4)
shape1 = (10, 11, 12) # fmri
shape2 = (13, 14, 15) # mask
shape3 = (16, 17, 18) # maps
n_regions = 9
length = 3
fmri11_img, _ = generate_random_img(shape1, affine=affine,
length=length)
_, mask22_img = generate_random_img(shape2, affine=affine,
length=length)
maps33_img, _ = \
testing.generate_maps(shape3, n_regions, affine=affine)
# Test error checking
assert_raises(ValueError, NiftiMapsMasker, maps33_img,
resampling_target="mask")
assert_raises(ValueError, NiftiMapsMasker, maps33_img,
resampling_target="invalid")
# Target: mask
masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img,
resampling_target="mask")
masker.fit()
np.testing.assert_almost_equal(masker.mask_img_.get_affine(),
mask22_img.get_affine())
assert_equal(masker.mask_img_.shape, mask22_img.shape)
np.testing.assert_almost_equal(masker.mask_img_.get_affine(),
masker.maps_img_.get_affine())
assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3])
transformed = masker.transform(fmri11_img)
assert_equal(transformed.shape, (length, n_regions))
fmri11_img_r = masker.inverse_transform(transformed)
np.testing.assert_almost_equal(fmri11_img_r.get_affine(),
masker.maps_img_.get_affine())
assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,)))
# Target: maps
masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img,
resampling_target="maps")
masker.fit()
np.testing.assert_almost_equal(masker.maps_img_.get_affine(),
maps33_img.get_affine())
assert_equal(masker.maps_img_.shape, maps33_img.shape)
np.testing.assert_almost_equal(masker.mask_img_.get_affine(),
masker.maps_img_.get_affine())
assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3])
transformed = masker.transform(fmri11_img)
assert_equal(transformed.shape, (length, n_regions))
fmri11_img_r = masker.inverse_transform(transformed)
np.testing.assert_almost_equal(fmri11_img_r.get_affine(),
masker.maps_img_.get_affine())
assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,)))
# Test with clipped maps: mask does not contain all maps.
# Shapes do matter in that case
affine1 = np.eye(4)
shape1 = (10, 11, 12)
shape2 = (8, 9, 10) # mask
affine2 = np.diag((2, 2, 2, 1)) # just for mask
shape3 = (16, 18, 20) # maps
n_regions = 9
length = 21
fmri11_img, _ = generate_random_img(shape1, affine=affine1, length=length)
_, mask22_img = testing.generate_fake_fmri(shape2, length=1,
affine=affine2)
# Target: maps
maps33_img, _ = \
testing.generate_maps(shape3, n_regions, affine=affine1)
masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img,
resampling_target="maps")
masker.fit()
np.testing.assert_almost_equal(masker.maps_img_.get_affine(),
maps33_img.get_affine())
assert_equal(masker.maps_img_.shape, maps33_img.shape)
np.testing.assert_almost_equal(masker.mask_img_.get_affine(),
masker.maps_img_.get_affine())
assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3])
transformed = masker.transform(fmri11_img)
assert_equal(transformed.shape, (length, n_regions))
# Some regions have been clipped. Resulting signal must be zero
assert_true((transformed.var(axis=0) == 0).sum() < n_regions)
fmri11_img_r = masker.inverse_transform(transformed)
np.testing.assert_almost_equal(fmri11_img_r.get_affine(),
masker.maps_img_.get_affine())
assert_equal(fmri11_img_r.shape,
(masker.maps_img_.shape[:3] + (length,)))
|
|
"""
Famous social networks.
"""
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <[email protected]>',
'Katy Bold <[email protected]>',
'Aric Hagberg <[email protected])'])
__all__ = ['karate_club_graph', 'davis_southern_women_graph',
'florentine_families_graph']
def karate_club_graph():
"""Return Zachary's Karate Club graph.
Each node in the returned graph has a node attribute ``'club'`` that
indicates the name of the club to which the member represented by that node
belongs, either ``'Mr. Hi'`` or ``'Officer'``.
Examples
--------
To get the name of the club to which a node belongs::
>>> import networkx as nx
>>> G = nx.karate_club_graph()
>>> G.node[5]['club']
'Mr. Hi'
>>> G.node[9]['club']
'Officer'
References
----------
.. [1] Zachary, Wayne W.
"An Information Flow Model for Conflict and Fission in Small Groups."
*Journal of Anthropological Research*, 33, 452--473, (1977).
.. [2] Data file from:
http://vlado.fmf.uni-lj.si/pub/networks/data/Ucinet/UciData.htm
"""
# Create the set of all members, and the members of each club.
all_members = set(range(34))
club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}
# club2 = all_members - club1
G = nx.Graph()
G.add_nodes_from(all_members)
G.name = "Zachary's Karate Club"
zacharydat = """\
0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0
1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0
1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0
1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1
0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1
0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1
0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0"""
for row, line in enumerate(zacharydat.split('\n')):
thisrow = [int(b) for b in line.split()]
for col, entry in enumerate(thisrow):
if entry == 1:
G.add_edge(row, col)
# Add the name of each member's club as a node attribute.
for v in G:
G.node[v]['club'] = 'Mr. Hi' if v in club1 else 'Officer'
return G
def davis_southern_women_graph():
"""Return Davis Southern women social network.
This is a bipartite graph.
References
----------
.. [1] A. Davis, Gardner, B. B., Gardner, M. R., 1941. Deep South.
University of Chicago Press, Chicago, IL.
"""
G = nx.Graph()
# Top nodes
women = ["Evelyn Jefferson",
"Laura Mandeville",
"Theresa Anderson",
"Brenda Rogers",
"Charlotte McDowd",
"Frances Anderson",
"Eleanor Nye",
"Pearl Oglethorpe",
"Ruth DeSand",
"Verne Sanderson",
"Myra Liddel",
"Katherina Rogers",
"Sylvia Avondale",
"Nora Fayette",
"Helen Lloyd",
"Dorothy Murchison",
"Olivia Carleton",
"Flora Price"]
G.add_nodes_from(women, bipartite=0)
# Bottom nodes
events = ["E1",
"E2",
"E3",
"E4",
"E5",
"E6",
"E7",
"E8",
"E9",
"E10",
"E11",
"E12",
"E13",
"E14"]
G.add_nodes_from(events, bipartite=1)
G.add_edges_from([("Evelyn Jefferson","E1"),
("Evelyn Jefferson","E2"),
("Evelyn Jefferson","E3"),
("Evelyn Jefferson","E4"),
("Evelyn Jefferson","E5"),
("Evelyn Jefferson","E6"),
("Evelyn Jefferson","E8"),
("Evelyn Jefferson","E9"),
("Laura Mandeville","E1"),
("Laura Mandeville","E2"),
("Laura Mandeville","E3"),
("Laura Mandeville","E5"),
("Laura Mandeville","E6"),
("Laura Mandeville","E7"),
("Laura Mandeville","E8"),
("Theresa Anderson","E2"),
("Theresa Anderson","E3"),
("Theresa Anderson","E4"),
("Theresa Anderson","E5"),
("Theresa Anderson","E6"),
("Theresa Anderson","E7"),
("Theresa Anderson","E8"),
("Theresa Anderson","E9"),
("Brenda Rogers","E1"),
("Brenda Rogers","E3"),
("Brenda Rogers","E4"),
("Brenda Rogers","E5"),
("Brenda Rogers","E6"),
("Brenda Rogers","E7"),
("Brenda Rogers","E8"),
("Charlotte McDowd","E3"),
("Charlotte McDowd","E4"),
("Charlotte McDowd","E5"),
("Charlotte McDowd","E7"),
("Frances Anderson","E3"),
("Frances Anderson","E5"),
("Frances Anderson","E6"),
("Frances Anderson","E8"),
("Eleanor Nye","E5"),
("Eleanor Nye","E6"),
("Eleanor Nye","E7"),
("Eleanor Nye","E8"),
("Pearl Oglethorpe","E6"),
("Pearl Oglethorpe","E8"),
("Pearl Oglethorpe","E9"),
("Ruth DeSand","E5"),
("Ruth DeSand","E7"),
("Ruth DeSand","E8"),
("Ruth DeSand","E9"),
("Verne Sanderson","E7"),
("Verne Sanderson","E8"),
("Verne Sanderson","E9"),
("Verne Sanderson","E12"),
("Myra Liddel","E8"),
("Myra Liddel","E9"),
("Myra Liddel","E10"),
("Myra Liddel","E12"),
("Katherina Rogers","E8"),
("Katherina Rogers","E9"),
("Katherina Rogers","E10"),
("Katherina Rogers","E12"),
("Katherina Rogers","E13"),
("Katherina Rogers","E14"),
("Sylvia Avondale","E7"),
("Sylvia Avondale","E8"),
("Sylvia Avondale","E9"),
("Sylvia Avondale","E10"),
("Sylvia Avondale","E12"),
("Sylvia Avondale","E13"),
("Sylvia Avondale","E14"),
("Nora Fayette","E6"),
("Nora Fayette","E7"),
("Nora Fayette","E9"),
("Nora Fayette","E10"),
("Nora Fayette","E11"),
("Nora Fayette","E12"),
("Nora Fayette","E13"),
("Nora Fayette","E14"),
("Helen Lloyd","E7"),
("Helen Lloyd","E8"),
("Helen Lloyd","E10"),
("Helen Lloyd","E11"),
("Helen Lloyd","E12"),
("Dorothy Murchison","E8"),
("Dorothy Murchison","E9"),
("Olivia Carleton","E9"),
("Olivia Carleton","E11"),
("Flora Price","E9"),
("Flora Price","E11")])
G.graph['top'] = women
G.graph['bottom'] = events
return G
def florentine_families_graph():
"""Return Florentine families graph.
References
----------
.. [1] Ronald L. Breiger and Philippa E. Pattison
Cumulated social roles: The duality of persons and their algebras,1
Social Networks, Volume 8, Issue 3, September 1986, Pages 215-256
"""
G=nx.Graph()
G.add_edge('Acciaiuoli','Medici')
G.add_edge('Castellani','Peruzzi')
G.add_edge('Castellani','Strozzi')
G.add_edge('Castellani','Barbadori')
G.add_edge('Medici','Barbadori')
G.add_edge('Medici','Ridolfi')
G.add_edge('Medici','Tornabuoni')
G.add_edge('Medici','Albizzi')
G.add_edge('Medici','Salviati')
G.add_edge('Salviati','Pazzi')
G.add_edge('Peruzzi','Strozzi')
G.add_edge('Peruzzi','Bischeri')
G.add_edge('Strozzi','Ridolfi')
G.add_edge('Strozzi','Bischeri')
G.add_edge('Ridolfi','Tornabuoni')
G.add_edge('Tornabuoni','Guadagni')
G.add_edge('Albizzi','Ginori')
G.add_edge('Albizzi','Guadagni')
G.add_edge('Bischeri','Guadagni')
G.add_edge('Guadagni','Lamberteschi')
return G
|
|
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import itertools
import unittest
from generator import generator, generate
from nose.plugins.attrib import attr
import cloudferry_devlab.tests.base as base
import cloudferry_devlab.tests.config as config
from cloudferry_devlab.tests import functional_test
@generator
class CinderMigrationTests(functional_test.FunctionalTest):
"""Test Case class which includes glance images migration cases."""
def setUp(self):
super(CinderMigrationTests, self).setUp()
self.src_volume_list = self.filter_volumes()
self.dst_volume_list = self.dst_cloud.cinderclient.volumes.list(
search_opts={'all_tenants': 1})
@attr(migrated_tenant='tenant2')
@generate('display_name', 'size', 'bootable', 'metadata',
'volume_image_metadata')
def test_migrate_cinder_volumes(self, param):
"""Validate volumes were migrated with correct parameters.
:param name:
:param size:
:param bootable:
:param metadata:"""
def ignore_default_metadata(volumes):
default_keys = ('readonly', 'attached_mode', 'src_volume_id')
for vol in volumes:
for default_key in default_keys:
if default_key in vol.metadata:
del vol.metadata[default_key]
return volumes
src_volume_list = ignore_default_metadata(self.src_volume_list)
dst_volume_list = ignore_default_metadata(self.dst_volume_list)
if param == 'volume_image_metadata':
def ignore_image_id(volumes):
for vol in volumes:
metadata = getattr(vol, 'volume_image_metadata', None)
if metadata and 'image_id' in metadata:
del metadata['image_id']
vol.volume_image_metadata = metadata
return volumes
src_volume_list = ignore_image_id(src_volume_list)
dst_volume_list = ignore_image_id(dst_volume_list)
self.validate_resource_parameter_in_dst(
src_volume_list, dst_volume_list, resource_name='volume',
parameter=param)
@attr(migrated_tenant='tenant2')
def test_migrate_cinder_volumes_data(self):
"""Validate volume data was migrated correctly.
Scenario:
1. Get volumes on which data was written
2. Get floating ip address of vm, to which volume attached
3. Open TCP/22 port for vm's tenant,
4. Wait until vm accessible via ssh
5. Check mount point has been migrated with ephemeral storage
6. Mount volume
7. Check data on volume is correct
"""
def check_file_valid(filename):
get_md5_cmd = 'md5sum %s' % filename
get_old_md5_cmd = 'cat %s_md5' % filename
md5sum = self.migration_utils.execute_command_on_vm(
vm_ip, get_md5_cmd).split()[0]
old_md5sum = self.migration_utils.execute_command_on_vm(
vm_ip, get_old_md5_cmd).split()[0]
if md5sum != old_md5sum:
msg = "MD5 of file %s before and after migrate is different"
raise RuntimeError(msg % filename)
def check_mount_point_exists(ip, vol):
""" Method check directory, which will used as mount point for
volume, exists on the vm's ephemeral storage
:param ip: vm's ip address, where mount point should be checked
:param vol: dict with volume's parameters from tests/config.py
"""
command = '[ -d %s ]' % vol['mount_point']
try:
self.migration_utils.execute_command_on_vm(ip, command)
except SystemExit:
msg = ('Mount point for volume "{vol_name}" not found. Check '
'directory "{mp}" exists on vm with name "{vm_name}. '
'If not exists check ephemeral storage migration works '
'properly.')
self.fail(msg.format(vol_name=vol['display_name'],
mp=vol['mount_point'], vm_name=vm.name))
volumes = config.cinder_volumes
volumes += itertools.chain(*[tenant['cinder_volumes'] for tenant
in config.tenants if 'cinder_volumes'
in tenant])
for volume in volumes:
attached_volume = volume.get('server_to_attach')
if not volume.get('write_to_file') or not attached_volume:
continue
vm = self.dst_cloud.novaclient.servers.get(
self.dst_cloud.get_vm_id(volume['server_to_attach']))
vm_ip = self.migration_utils.get_vm_fip(vm)
self.migration_utils.open_ssh_port_secgroup(self.dst_cloud,
vm.tenant_id)
base.BasePrerequisites.wait_until_objects(
[(vm_ip, 'pwd')],
self.migration_utils.wait_until_vm_accessible_via_ssh,
config.TIMEOUT)
check_mount_point_exists(vm_ip, volume)
cmd = 'mount {0} {1}'.format(volume['device'],
volume['mount_point'])
self.migration_utils.execute_command_on_vm(vm_ip, cmd,
warn_only=True)
for _file in volume['write_to_file']:
check_file_valid(volume['mount_point'] + _file['filename'])
def test_cinder_volumes_deleted_not_attached_to_vm(self):
"""Validate volumes that were deleted from cinder DB are not attached
to migrated vms on destination cloud.
"""
deleted_volumes_migrated = []
vol_name_list = self.dst_cloud.get_attached_server_volumes()
for vol in vol_name_list:
if vol in config.volumes_deleted_by_name_from_db:
deleted_volumes_migrated.append(vol)
if deleted_volumes_migrated:
self.fail(msg='Volume migrated despite that it was '
'deleted from cinder DB, Volume info: \n{}'.format(
deleted_volumes_migrated))
def test_cinder_volumes_not_in_filter_did_not_migrate(self):
"""Validate volumes not in filter weren't migrated."""
dst_volumes = [x.id for x in self.dst_volume_list]
filtering_data = self.filtering_utils.filter_volumes(
self.src_volume_list)
volumes_filtered_out = filtering_data[1]
volumes_not_in_filter = []
for volume in volumes_filtered_out:
if volume.id in dst_volumes:
volumes_not_in_filter.append(volume)
if volumes_not_in_filter:
self.fail(msg='Volumes migrated despite that it was not included '
'in filter, Volumes info: \n{}'.format(
volumes_not_in_filter))
def test_cinder_volumes_deleted_from_db_did_not_migrate(self):
"""Validate volumes that were deleted from cinder DB
weren't migrated.
"""
dst_volumes = [self.dst_cloud.get_volume_name(x.id)
for x in self.dst_volume_list]
deleted_volumes_migrated = []
for volume in config.volumes_deleted_by_name_from_db:
if volume in dst_volumes:
deleted_volumes_migrated.append(volume)
if deleted_volumes_migrated:
self.fail(msg='Volume migrated despite that it was '
'deleted from cinder DB, Volume info: \n{}'.format(
deleted_volumes_migrated))
def test_invalid_status_cinder_volumes_did_not_migrate(self):
"""Validate volumes with invalid statuses weren't migrated.
Statuses described in :mod:`config.py`
"""
src_volume_list = self.src_cloud.cinderclient.volumes.list(
search_opts={'all_tenants': 1})
dst_volumes = [x.id for x in self.dst_volume_list]
invalid_status_volumes = [
vol for vol in src_volume_list
if vol.status in config.INVALID_STATUSES
]
invalid_volumes_migrated = []
for volume in invalid_status_volumes:
if volume.id in dst_volumes:
invalid_volumes_migrated.append(volume)
if invalid_volumes_migrated:
self.fail(msg='Volume migrated despite that it had '
'invalid status, Volume info: \n{}'.format(
invalid_volumes_migrated))
@generate('display_name', 'size')
@unittest.skip("Temporarily disabled: snapshots doesn't implemented in "
"cinder's nfs driver")
def test_migrate_cinder_snapshots(self, param):
"""Validate volume snapshots were migrated with correct parameters.
:param name:
:param size:"""
dst_volume_list = self.dst_cloud.cinderclient.volume_snapshots.list(
search_opts={'all_tenants': 1})
self.validate_resource_parameter_in_dst(
self.src_volume_list, dst_volume_list, resource_name='volume',
parameter=param)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.stats import kruskal
from skbio.stats.power import (subsample_power,
subsample_paired_power,
_check_nans,
confidence_bound,
_calculate_power,
_compare_distributions,
_calculate_power_curve,
_check_subsample_power_inputs,
_identify_sample_groups,
_draw_paired_samples,
_get_min_size,
bootstrap_power_curve,
paired_subsamples
)
class PowerAnalysisTest(TestCase):
def setUp(self):
# Defines a testing functions
def test_meta(ids, meta, cat, div):
"""Checks thhe div metric with a kruskal wallis"""
out = [meta.loc[id_, div] for id_ in ids]
return kruskal(*out)[1]
def meta_f(x):
"""Applies `test_meta` to a result"""
return test_meta(x, self.meta, 'INT', 'DIV')
def f(x):
"""returns the p value of a kruskal wallis test"""
return kruskal(*x)[1]
self.test_meta = test_meta
self.f = f
self.meta_f = meta_f
self.num_p = 1
# Sets the random seed
np.random.seed(5)
# Sets up the distributions of data for use
self.s1 = np.arange(0, 10, 1)
# Sets up two distributions which will never be equal by a rank-sum
# test.
self.samps = [np.ones((10))/10., np.ones((10))]
self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)]
# Sets up a vector of alpha values
self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3)
# Sets up a vector of samples
self.num_samps = np.arange(10, 100, 10)
# Sets up a mapping file
meta = {'GW': {'INT': 'N', 'ABX': np.nan, 'DIV': 19.5, 'AGE': '30s',
'SEX': 'M'},
'CB': {'INT': 'Y', 'ABX': np.nan, 'DIV': 42.7, 'AGE': '30s',
'SEX': 'M'},
'WM': {'INT': 'N', 'ABX': 'N', 'DIV': 27.5, 'AGE': '20s',
'SEX': 'F'},
'MH': {'INT': 'Y', 'ABX': 'N', 'DIV': 62.3, 'AGE': '30s',
'SEX': 'F'},
'CD': {'INT': 'Y', 'ABX': 'Y', 'DIV': 36.4, 'AGE': '40s',
'SEX': 'F'},
'LF': {'INT': 'Y', 'ABX': 'N', 'DIV': 50.2, 'AGE': '20s',
'SEX': 'M'},
'PP': {'INT': 'N', 'ABX': 'Y', 'DIV': 10.8, 'AGE': '30s',
'SEX': 'F'},
'MM': {'INT': 'N', 'ABX': 'N', 'DIV': 55.6, 'AGE': '40s',
'SEX': 'F'},
'SR': {'INT': 'N', 'ABX': 'Y', 'DIV': 2.2, 'AGE': '20s',
'SEX': 'M'},
'TS': {'INT': 'N', 'ABX': 'Y', 'DIV': 16.1, 'AGE': '40s',
'SEX': 'M'},
'PC': {'INT': 'Y', 'ABX': 'N', 'DIV': 82.6, 'AGE': '40s',
'SEX': 'M'},
'NR': {'INT': 'Y', 'ABX': 'Y', 'DIV': 15.7, 'AGE': '20s',
'SEX': 'F'}}
self.meta = pd.DataFrame.from_dict(meta, orient='index')
self.meta_pairs = {0: [['GW', 'SR', 'TS'], ['CB', 'LF', 'PC']],
1: [['MM', 'PP', 'WM'], ['CD', 'MH', 'NR']]}
self.pair_index = np.array([0, 0, 0, 1, 1, 1])
self.counts = np.array([5, 15, 25, 35, 45])
self.powers = [np.array([[0.105, 0.137, 0.174, 0.208, 0.280],
[0.115, 0.135, 0.196, 0.204, 0.281],
[0.096, 0.170, 0.165, 0.232, 0.256],
[0.122, 0.157, 0.202, 0.250, 0.279],
[0.132, 0.135, 0.173, 0.203, 0.279]]),
np.array([[0.157, 0.345, 0.522, 0.639, 0.739],
[0.159, 0.374, 0.519, 0.646, 0.757],
[0.161, 0.339, 0.532, 0.634, 0.745],
[0.169, 0.372, 0.541, 0.646, 0.762],
[0.163, 0.371, 0.522, 0.648, 0.746]]),
np.array([[0.276, 0.626, 0.865, 0.927, 0.992],
[0.267, 0.667, 0.848, 0.937, 0.978],
[0.236, 0.642, 0.850, 0.935, 0.977],
[0.249, 0.633, 0.828, 0.955, 0.986],
[0.249, 0.663, 0.869, 0.951, 0.985]])]
self.power_alpha = 0.1
self.effects = np.array([0.15245, 0.34877, 0.55830])
self.bounds = np.array([0.01049, 0.00299, 0.007492])
self.labels = np.array(['Age', 'Intervenption', 'Antibiotics'])
self.cats = np.array(['AGE', 'INT', 'ABX'])
self.cat = "AGE"
self.control_cats = ['INT', 'ABX']
def test_subsample_power_defaults(self):
test_p, test_c = subsample_power(self.f, self.pop,
num_iter=10, num_runs=5)
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_counts(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=2,
min_counts=5)
self.assertEqual(test_p.shape, (2, 5))
npt.assert_array_equal(np.arange(5, 50, 10), test_c)
def test_subsample_power_matches(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=5,
draw_mode="matched")
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_multi_p(self):
test_p, test_c = subsample_power(lambda x: np.array([0.5, 0.5]),
samples=self.pop,
num_iter=10,
num_runs=5)
self.assertEqual(test_p.shape, (5, 4, 2))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_paired_power(self):
known_c = np.array([1, 2, 3, 4])
# Sets up the handling values
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(self.meta_f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
# Test the output shapes are sane
self.assertEqual(test_p.shape, (2, 4))
npt.assert_array_equal(known_c, test_c)
def test_subsample_paired_power_multi_p(self):
def f(x):
return np.array([0.5, 0.5, 0.005])
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
self.assertEqual(test_p.shape, (2, 4, 3))
def test_check_nans_str(self):
self.assertTrue(_check_nans('string'))
def test_check_nans_num(self):
self.assertTrue(_check_nans(4.2))
def test__check_nans_nan(self):
self.assertFalse(_check_nans(np.nan))
def test__check_nans_clean_list(self):
self.assertTrue(_check_nans(['foo', 'bar'], switch=True))
def test__check_nans_list_nan(self):
self.assertFalse(_check_nans(['foo', np.nan], switch=True))
def test__check_str_error(self):
with self.assertRaises(TypeError):
_check_nans(self.f)
def test__get_min_size_strict(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
True)
self.assertEqual(test, known)
def test__get_min_size_relaxed(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
False)
self.assertEqual(known, test)
def test_confidence_bound_default(self):
# Sets the know confidence bound
known = 2.2830070
test = confidence_bound(self.s1)
npt.assert_almost_equal(test, known, 3)
def test_confidence_bound_df(self):
known = 2.15109
test = confidence_bound(self.s1, df=15)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_alpha(self):
known = 3.2797886
test = confidence_bound(self.s1, alpha=0.01)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_nan(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sets the know value
known = np.array([2.2284, 0.2573, 0.08573])
# Tests the function
test = confidence_bound(samples, axis=0)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_axis_none(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sest the known value
known = 0.52852
# Tests the output
test = confidence_bound(samples, axis=None)
npt.assert_almost_equal(known, test, 3)
def test__calculate_power(self):
# Sets up the values to test
crit = 0.025
# Sets the known value
known = 0.5
# Calculates the test value
test = _calculate_power(self.alpha, crit)
# Checks the test value
npt.assert_almost_equal(known, test)
def test__calculate_power_n(self):
crit = 0.025
known = np.array([0.5, 0.5])
alpha = np.vstack((self.alpha, self.alpha))
test = _calculate_power(alpha, crit)
npt.assert_almost_equal(known, test)
def test__compare_distributions_sample_counts_error(self):
with self.assertRaises(ValueError):
_compare_distributions(self.f, [self.pop[0][:5], self.pop[1]], 1,
counts=25)
def test__compare_distributions_all_mode(self):
known = np.ones((100))*0.0026998
test = _compare_distributions(self.f, self.samps, 1, num_iter=100)
npt.assert_allclose(known, test, 5)
def test__compare_distributions_matched_mode(self):
# Sets the known value
known_mean = 0.162195
known_std = 0.121887
known_shape = (100,)
# Tests the sample value
test = _compare_distributions(self.f, self.pop, self.num_p,
mode='matched', num_iter=100)
npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02)
npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02)
self.assertEqual(known_shape, test.shape)
def test__compare_distributions_draw_mode(self):
draw_mode = 'Ultron'
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f, self.pop, draw_mode,
self.num_p)
def test__compare_distributions_multiple_returns(self):
known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
def f(x):
return np.array([1, 2, 3])
test = _compare_distributions(f, self.pop, 3, mode='matched',
num_iter=3)
npt.assert_array_equal(known, test)
def test_check_subsample_power_inputs_matched_mode(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((2)), np.ones((5))],
draw_mode="matched")
def test_check_subsample_power_inputs_counts(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((3)), np.ones((5))],
min_counts=5,
counts_interval=1000,
max_counts=7)
def test_check_subsample_power_inputs_ratio(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
self.samps,
ratio=np.array([1, 2, 3]))
def test_check_subsample_power_inputs_test(self):
# Defines a test function
def test(x):
return 'Hello World!'
with self.assertRaises(TypeError):
_check_subsample_power_inputs(test, self.samps)
def test_check_sample_power_inputs(self):
# Defines the know returns
known_num_p = 1
known_ratio = np.ones((2))
known_counts = np.arange(2, 10, 2)
# Runs the code for the returns
test_ratio, test_num_p, test_counts = \
_check_subsample_power_inputs(self.f,
self.samps,
counts_interval=2,
max_counts=10)
# Checks the returns are sane
self.assertEqual(known_num_p, test_num_p)
npt.assert_array_equal(known_ratio, test_ratio)
npt.assert_array_equal(known_counts, test_counts)
def test__calculate_power_curve_ratio_error(self):
with self.assertRaises(ValueError):
_calculate_power_curve(self.f, self.pop, self.num_samps,
ratio=np.array([0.1, 0.2, 0.3]),
num_iter=100)
def test__calculate_power_curve_default(self):
# Sets the known output
known = np.array([0.509, 0.822, 0.962, 0.997, 1.000, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.01)
def test__calculate_power_curve_alpha(self):
# Sets the know output
known = np.array([0.31, 0.568, 0.842, 0.954, 0.995, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
alpha=0.01,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test__calculate_power_curve_ratio(self):
# Sets the know output
known = np.array([0.096, 0.333, 0.493, 0.743, 0.824, 0.937, 0.969,
0.996, 0.998])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
ratio=np.array([0.25, 0.75]),
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test_bootstrap_power_curve(self):
# Sets the known values
known_mean = np.array([0.500, 0.82, 0.965, 0.995, 1.000, 1.000,
1.000, 1.000, 1.000])
known_bound = np.array([0.03, 0.02, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00,
0.00])
# Generates the test values
test_mean, test_bound = bootstrap_power_curve(self.f,
self.pop,
self.num_samps,
num_iter=100)
# Checks the function returned sanely
npt.assert_allclose(test_mean, known_mean, rtol=0.05, atol=0.05)
npt.assert_allclose(test_bound, known_bound, rtol=0.1, atol=0.01)
def test_paired_subsamples_default(self):
# Sets the known np.array set
known_array = [{'MM', 'SR', 'TS', 'GW', 'PP', 'WM'},
{'CD', 'LF', 'PC', 'CB', 'MH', 'NR'}]
# Gets the test value
cat = 'INT'
control_cats = ['SEX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats)
self.assertEqual(known_array[0], set(test_array[0]))
self.assertEqual(known_array[1], set(test_array[1]))
def test_paired_subsamples_break(self):
# Sets known np.array set
known_array = [np.array([]), np.array([])]
# Gets the test value
cat = 'ABX'
control_cats = ['SEX', 'AGE', 'INT']
test_array = paired_subsamples(self.meta, cat, control_cats)
npt.assert_array_equal(known_array, test_array)
def test_paired_subsample_undefined(self):
known_array = np.zeros((2, 0))
cat = 'INT'
order = ['Y', 'N']
control_cats = ['AGE', 'ABX', 'SEX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
npt.assert_array_equal(test_array, known_array)
def test_paired_subsample_fewer(self):
# Set known value
known_array = {'PP', 'MH', 'CD', 'PC', 'TS', 'MM'}
# Sets up test values
cat = 'AGE'
order = ['30s', '40s']
control_cats = ['ABX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
for v in test_array[0]:
self.assertTrue(v in known_array)
for v in test_array[1]:
self.assertTrue(v in known_array)
def test_paired_subsamples_not_strict(self):
known_array = [{'WM', 'MM', 'GW', 'SR', 'TS'},
{'LF', 'PC', 'CB', 'NR', 'CD'}]
# Gets the test values
cat = 'INT'
control_cats = ['ABX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats,
strict_match=False)
self.assertEqual(set(test_array[0]), known_array[0])
self.assertEqual(set(test_array[1]), known_array[1])
def test__identify_sample_groups(self):
# Defines the know values
known_pairs = {0: [['MM'], ['CD']],
1: [['SR'], ['LF']],
2: [['TS'], ['PC']],
3: [['GW'], ['CB']],
4: [['PP'], ['MH']],
5: [['WM'], ['NR']]}
known_index = np.array([0, 1, 2, 3, 4, 5])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'AGE'],
order=['N', 'Y'],
strict_match=True)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
self.assertEqual(sorted(known_pairs.values()),
sorted(test_pairs.values()))
npt.assert_array_equal(known_index, test_index)
def test__identify_sample_groups_not_strict(self):
# Defines the know values
known_pairs = {0: [['PP'], ['CD', 'NR']],
1: [['MM', 'WM'], ['MH']],
2: [['GW'], ['CB']]}
known_index = np.array([0, 1, 2])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'ABX'],
order=['N', 'Y'],
strict_match=False)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
self.assertEqual(sorted(known_pairs.values()),
sorted(test_pairs.values()))
npt.assert_array_equal(known_index, test_index)
def test__draw_paired_samples(self):
num_samps = 3
known_sets = [{'GW', 'SR', 'TS', 'MM', 'PP', 'WM'},
{'CB', 'LF', 'PC', 'CD', 'MH', 'NR'}]
test_samps = _draw_paired_samples(self.meta_pairs, self.pair_index,
num_samps)
for i, t in enumerate(test_samps):
self.assertTrue(set(t).issubset(known_sets[i]))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import json
from os import path
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3 import FS, S3CustomController
from s3theme import formstyle_foundation_inline
THEME = "EVASS"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
T = current.T
request = current.request
s3 = current.response.s3
# Check logged in and permissions
auth = current.auth
settings = current.deployment_settings
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
# Login/Registration forms
self_registration = current.deployment_settings.get_security_registration_visible()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration is True:
# Provide a Registration box on front page
register_form = auth.register()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
if login_form.errors:
hide, show = "#register_form", "#login_form"
else:
hide, show = "#login_form", "#register_form"
post_script = \
'''$('%s').addClass('hide')
$('%s').removeClass('hide')''' % (hide, show)
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# s3.js_global.append(feed_control)
# Provide a login box on front page
auth.messages.submit_button = T("Login")
login_form = auth.login(inline=True)
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
else:
output["event_list"] = self.event_list()
output["shelter_list"] = self.shelter_list()
output["events_btn"] = self.events_btn()
output["pr_btn"] = self.pr_btn()
output["staff_btn"] = self.staff_btn()
output["volunteers_btn"] = self.volunteers_btn()
output["evacuees_btn"] = self.evacuees_btn()
output["shelters_btn"] = self.shelters_btn()
output["self_registration"] = self_registration
output["registered"] = registered
output["login_div"] = login_div
output["login_form"] = login_form
output["register_div"] = register_div
output["register_form"] = register_form
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:3,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
# Slick slider
if s3.debug:
s3.scripts.append("/%s/static/scripts/slick.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/slick.min.js" % request.application)
script = '''
$(document).ready(function(){
$('#title-image').slick({
autoplay:true,
autoplaySpeed:5000,
speed:1000,
fade:true,
cssEase:'linear',
adaptiveHeight:true
});
});'''
s3.jquery_ready.append(script)
self._view(THEME, "index.html")
return output
# -------------------------------------------------------------------------
def shelter_list(self):
""" Provide a dropdown of links to shelters """
T = current.T
s3db = current.s3db
resource = s3db.resource("cr_shelter",
filter = FS("status")
.belongs([2, None]))
data = resource.select(["id", "name"])
shelter_list = UL(_id = "shelter_list",
_class = "f-dropdown",
data = {"dropdown-content": ""})
rows = data["rows"]
if rows:
for row in rows:
shelter_list.append(LI(A(row["cr_shelter.name"],
_href=URL(c="cr",
f="shelter",
args=[row["cr_shelter.id"]])
)
)
)
return LI(A(T("Shelters"),
_class="button dropdown",
data = {"dropdown": "shelter_list"}),
shelter_list
)
else:
# @todo: check permission and provide an "Add Shelter" button
# if not shelters are yet registered
return ""
# -------------------------------------------------------------------------
def event_list(self):
""" Provide a dropdown of links to events """
T = current.T
s3db = current.s3db
resource = s3db.resource("event_event")
data = resource.select(["id", "name"])
event_list = UL(_id = "event_list",
_class = "f-dropdown",
data = {"dropdown-content": ""})
rows = data["rows"]
if rows:
for row in rows:
event_list.append(LI(A(row["event_event.name"],
_href=URL(c="event",
f="event",
args=[row["event_event.id"]])
)
)
)
return LI(A(T("Events"),
_class="button dropdown",
data = {"dropdown": "event_list"}),
event_list
)
else:
# @todo: check permission and provide an "Add Event" button
# if not events are yet registered?
return ""
# -------------------------------------------------------------------------
def events_btn(self):
T = current.T
return LI(A(T("Events"),
_href=URL(c="event", f="event"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def pr_btn(self):
T = current.T
return LI(A(T("Person Registry"),
_href=URL(c="pr", f="index"),
_class="button button-home",
_id="incident-report-btn")
)
# -------------------------------------------------------------------------
def staff_btn(self):
T = current.T
return LI(A(T("Staff"),
_href=URL(c="hrm", f="staff", args=["summary"]),
_class="button button-home")
)
# -------------------------------------------------------------------------
def volunteers_btn(self):
T = current.T
return LI(A(T("Volunteers"),
_href=URL(c="vol", f="volunteer"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def evacuees_btn(self):
T = current.T
return LI(A(T("Evacuees"),
_href=URL(c="evr", f="person"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def shelters_btn(self):
T = current.T
return LI(A(T("Shelters"),
_href=URL(c="cr", f="shelter"),
_class="button button-home")
)
# END =========================================================================
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import logging
import pprint
import time
import requests
from django.conf import settings
from django.forms.models import fields_for_model
from django.http import QueryDict
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.http import urlencode
from paypal.pro.signals import payment_was_successful, recurring_cancel, recurring_suspend, recurring_reactivate, payment_profile_created
from paypal.pro.models import PayPalNVP
from paypal.pro.exceptions import PayPalFailure
USER = settings.PAYPAL_WPP_USER
PASSWORD = settings.PAYPAL_WPP_PASSWORD
SIGNATURE = settings.PAYPAL_WPP_SIGNATURE
VERSION = 116.0
BASE_PARAMS = dict(USER=USER, PWD=PASSWORD, SIGNATURE=SIGNATURE, VERSION=VERSION)
ENDPOINT = "https://api-3t.paypal.com/nvp"
SANDBOX_ENDPOINT = "https://api-3t.sandbox.paypal.com/nvp"
EXPRESS_ENDPOINT = "https://www.paypal.com/webscr?cmd=_express-checkout&%s"
SANDBOX_EXPRESS_ENDPOINT = "https://www.sandbox.paypal.com/webscr?cmd=_express-checkout&%s"
log = logging.getLogger(__file__)
def paypal_time(time_obj=None):
"""Returns a time suitable for PayPal time fields."""
if time_obj is None:
time_obj = time.gmtime()
return time.strftime(PayPalNVP.TIMESTAMP_FORMAT, time_obj)
def paypaltime2datetime(s):
"""Convert a PayPal time string to a DateTime."""
naive = datetime.datetime.strptime(s, PayPalNVP.TIMESTAMP_FORMAT)
if not settings.USE_TZ:
return naive
else:
# TIMESTAMP_FORMAT is UTC
return timezone.make_aware(naive, timezone.UTC())
class PayPalError(TypeError):
"""Error thrown when something is wrong."""
def express_endpoint():
if getattr(settings, 'PAYPAL_TEST', True):
return SANDBOX_EXPRESS_ENDPOINT
else:
return EXPRESS_ENDPOINT
def express_endpoint_for_token(token, commit=False):
"""
Returns the PayPal Express Checkout endpoint for a token.
Pass 'commit=True' if you will not prompt for confirmation when the user
returns to your site.
"""
pp_params = dict(token=token)
if commit:
pp_params['useraction'] = 'commit'
return express_endpoint() % urlencode(pp_params)
class PayPalWPP(object):
"""
Wrapper class for the PayPal Website Payments Pro.
Website Payments Pro Integration Guide:
https://cms.paypal.com/cms_content/US/en_US/files/developer/PP_WPP_IntegrationGuide.pdf
Name-Value Pair API Developer Guide and Reference:
https://cms.paypal.com/cms_content/US/en_US/files/developer/PP_NVPAPI_DeveloperGuide.pdf
"""
def __init__(self, request=None, params=BASE_PARAMS):
"""Required - USER / PWD / SIGNATURE / VERSION"""
self.request = request
if getattr(settings, 'PAYPAL_TEST', True):
self.endpoint = SANDBOX_ENDPOINT
else:
self.endpoint = ENDPOINT
self.signature_values = params
self.signature = urlencode(self.signature_values) + "&"
@cached_property
def NVP_FIELDS(self):
# Put this onto class and load lazily, because in some cases there is an
# import order problem if we put it at module level.
return list(fields_for_model(PayPalNVP).keys())
def doDirectPayment(self, params):
"""Call PayPal DoDirectPayment method."""
defaults = {"method": "DoDirectPayment", "paymentaction": "Sale"}
required = ["creditcardtype",
"acct",
"expdate",
"cvv2",
"ipaddress",
"firstname",
"lastname",
"street",
"city",
"state",
"countrycode",
"zip",
"amt",
]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
payment_was_successful.send(sender=nvp_obj, **params)
# @@@ Could check cvv2match / avscode are both 'X' or '0'
# qd = django.http.QueryDict(nvp_obj.response)
# if qd.get('cvv2match') not in ['X', '0']:
# nvp_obj.set_flag("Invalid cvv2match: %s" % qd.get('cvv2match')
# if qd.get('avscode') not in ['X', '0']:
# nvp_obj.set_flag("Invalid avscode: %s" % qd.get('avscode')
return nvp_obj
def setExpressCheckout(self, params):
"""
Initiates an Express Checkout transaction.
Optionally, the SetExpressCheckout API operation can set up billing agreements for
reference transactions and recurring payments.
Returns a NVP instance - check for token and payerid to continue!
"""
if "amt" in params:
import warnings
warnings.warn("'amt' has been deprecated. 'paymentrequest_0_amt' "
"should be used instead.", DeprecationWarning)
# Make a copy so we don't change things unexpectedly
params = params.copy()
params.update({'paymentrequest_0_amt': params['amt']})
del params['amt']
if self._is_recurring(params):
params = self._recurring_setExpressCheckout_adapter(params)
defaults = {"method": "SetExpressCheckout", "noshipping": 1}
required = ["returnurl", "cancelurl", "paymentrequest_0_amt"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def doExpressCheckoutPayment(self, params):
"""
Check the dude out:
"""
if "amt" in params:
import warnings
warnings.warn("'amt' has been deprecated. 'paymentrequest_0_amt' "
"should be used instead.", DeprecationWarning)
# Make a copy so we don't change things unexpectedly
params = params.copy()
params.update({'paymentrequest_0_amt': params['amt']})
del params['amt']
defaults = {"method": "DoExpressCheckoutPayment", "paymentaction": "Sale"}
required = ["paymentrequest_0_amt", "token", "payerid"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
payment_was_successful.send(sender=nvp_obj, **params)
return nvp_obj
def createRecurringPaymentsProfile(self, params, direct=False):
"""
Set direct to True to indicate that this is being called as a directPayment.
Returns True PayPal successfully creates the profile otherwise False.
"""
defaults = {"method": "CreateRecurringPaymentsProfile"}
required = ["profilestartdate", "billingperiod", "billingfrequency", "amt"]
# Direct payments require CC data
if direct:
required + ["creditcardtype", "acct", "expdate", "firstname", "lastname"]
else:
required + ["token", "payerid"]
nvp_obj = self._fetch(params, required, defaults)
# Flag if profile_type != ActiveProfile
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
payment_profile_created.send(sender=nvp_obj, **params)
return nvp_obj
def getExpressCheckoutDetails(self, params):
defaults = {"method": "GetExpressCheckoutDetails"}
required = ["token"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def setCustomerBillingAgreement(self, params):
raise DeprecationWarning
def createBillingAgreement(self, params):
"""
Create a billing agreement for future use, without any initial payment
"""
defaults = {"method": "CreateBillingAgreement"}
required = ["token"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def getTransactionDetails(self, params):
defaults = {"method": "GetTransactionDetails"}
required = ["transactionid"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def massPay(self, params):
raise NotImplementedError
def getRecurringPaymentsProfileDetails(self, params):
raise NotImplementedError
def updateRecurringPaymentsProfile(self, params):
defaults = {"method": "UpdateRecurringPaymentsProfile"}
required = ["profileid"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def billOutstandingAmount(self, params):
raise NotImplementedError
def manangeRecurringPaymentsProfileStatus(self, params, fail_silently=False):
"""
Requires `profileid` and `action` params.
Action must be either "Cancel", "Suspend", or "Reactivate".
"""
defaults = {"method": "ManageRecurringPaymentsProfileStatus"}
required = ["profileid", "action"]
nvp_obj = self._fetch(params, required, defaults)
# TODO: This fail silently check should be using the error code, but its not easy to access
if not nvp_obj.flag or (
fail_silently and nvp_obj.flag_info == 'Invalid profile status for cancel action; profile should be active or suspended'):
if params['action'] == 'Cancel':
recurring_cancel.send(sender=nvp_obj)
elif params['action'] == 'Suspend':
recurring_suspend.send(sender=nvp_obj)
elif params['action'] == 'Reactivate':
recurring_reactivate.send(sender=nvp_obj)
else:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def refundTransaction(self, params):
raise NotImplementedError
def doReferenceTransaction(self, params):
"""
Process a payment from a buyer's account, identified by a previous
transaction.
The `paymentaction` param defaults to "Sale", but may also contain the
values "Authorization" or "Order".
"""
defaults = {"method": "DoReferenceTransaction",
"paymentaction": "Sale"}
required = ["referenceid", "amt"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def _is_recurring(self, params):
"""Returns True if the item passed is a recurring transaction."""
return 'billingfrequency' in params
def _recurring_setExpressCheckout_adapter(self, params):
"""
The recurring payment interface to SEC is different than the recurring payment
interface to ECP. This adapts a normal call to look like a SEC call.
"""
params['l_billingtype0'] = "RecurringPayments"
params['l_billingagreementdescription0'] = params['desc']
REMOVE = ["billingfrequency", "billingperiod", "profilestartdate", "desc"]
for k in params.keys():
if k in REMOVE:
del params[k]
return params
def _fetch(self, params, required, defaults):
"""Make the NVP request and store the response."""
defaults.update(params)
pp_params = self._check_and_update_params(required, defaults)
pp_string = self.signature + urlencode(pp_params)
response = self._request(pp_string)
response_params = self._parse_response(response)
if getattr(settings, 'PAYPAL_DEBUG', settings.DEBUG):
log.debug('PayPal Request:\n%s\n', pprint.pformat(defaults))
log.debug('PayPal Response:\n%s\n', pprint.pformat(response_params))
# Gather all NVP parameters to pass to a new instance.
nvp_params = {}
tmpd = defaults.copy()
tmpd.update(response_params)
for k, v in tmpd.items():
if k in self.NVP_FIELDS:
nvp_params[str(k)] = v
# PayPal timestamp has to be formatted.
if 'timestamp' in nvp_params:
nvp_params['timestamp'] = paypaltime2datetime(nvp_params['timestamp'])
nvp_obj = PayPalNVP(**nvp_params)
nvp_obj.init(self.request, params, response_params)
nvp_obj.save()
return nvp_obj
def _request(self, data):
"""Moved out to make testing easier."""
return requests.post(self.endpoint, data=data.encode("ascii")).content
def _check_and_update_params(self, required, params):
"""
Ensure all required parameters were passed to the API call and format
them correctly.
"""
for r in required:
if r not in params:
raise PayPalError("Missing required param: %s" % r)
# Upper case all the parameters for PayPal.
return (dict((k.upper(), v) for k, v in params.items()))
def _parse_response(self, response):
"""Turn the PayPal response into a dict"""
q = QueryDict(response, encoding='UTF-8').dict()
return {k.lower(): v for k,v in q.items()}
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to perform fitting of structures.
"""
from __future__ import division, unicode_literals
import six
from six.moves import filter
from six.moves import zip
import numpy as np
import itertools
import abc
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.optimization.linear_assignment import LinearAssignment
from pymatgen.util.coord_cython import pbc_shortest_vectors, is_coord_subset_pbc
from pymatgen.util.coord import lattice_points_in_supercell
__author__ = "William Davidson Richards, Stephen Dacek, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "William Davidson Richards"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Dec 3, 2012"
class AbstractComparator(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract Comparator class. A Comparator defines how sites are compared in
a structure.
"""
@abc.abstractmethod
def are_equal(self, sp1, sp2):
"""
Defines how the species of two sites are considered equal. For
example, one can consider sites to have the same species only when
the species are exactly the same, i.e., Fe2+ matches Fe2+ but not
Fe3+. Or one can define that only the element matters,
and all oxidation state information are ignored.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are considered equal.
"""
return
@abc.abstractmethod
def get_hash(self, composition):
"""
Defines a hash to group structures. This allows structures to be
grouped efficiently for comparison. The hash must be invariant under
supercell creation. (e.g. composition is not a good hash, but
fractional_composition might be). Reduced formula is not a good formula,
due to weird behavior with fractional occupancy.
Composition is used here instead of structure because for anonymous
matches it is much quicker to apply a substitution to a composition
object than a structure object.
Args:
composition (Composition): composition of the structure
Returns:
A hashable object. Examples can be string formulas, integers etc.
"""
return
@classmethod
def from_dict(cls, d):
for trans_modules in ['structure_matcher']:
mod = __import__('pymatgen.analysis.' + trans_modules,
globals(), locals(), [d['@class']], 0)
if hasattr(mod, d['@class']):
trans = getattr(mod, d['@class'])
return trans()
raise ValueError("Invalid Comparator dict")
def as_dict(self):
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class SpeciesComparator(AbstractComparator):
"""
A Comparator that matches species exactly. The default used in
StructureMatcher.
"""
def are_equal(self, sp1, sp2):
"""
True if species are exactly the same, i.e., Fe2+ == Fe2+ but not Fe3+.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are equal.
"""
return sp1 == sp2
def get_hash(self, composition):
"""
Returns: Fractional composition
"""
return composition.fractional_composition
class SpinComparator(AbstractComparator):
"""
A Comparator that matches magnetic structures to their inverse spins.
This comparator is primarily used to filter magnetically ordered
structures with opposite spins, which are equivalent.
"""
def are_equal(self, sp1, sp2):
"""
True if species are exactly the same, i.e., Fe2+ == Fe2+ but not
Fe3+. and the spins are reversed. i.e., spin up maps to spin down,
and vice versa.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are equal.
"""
for s1 in sp1.keys():
spin1 = getattr(s1, "spin", 0)
oxi1 = getattr(s1, "oxi_state", 0)
for s2 in sp2.keys():
spin2 = getattr(s2, "spin", 0)
oxi2 = getattr(s2, "oxi_state", 0)
if (s1.symbol == s2.symbol and oxi1 == oxi2 and
spin2 == -spin1):
break
else:
return False
return True
def get_hash(self, composition):
"""
Returns: Fractional composition
"""
return composition.fractional_composition
class ElementComparator(AbstractComparator):
"""
A Comparator that matches elements. i.e. oxidation states are
ignored.
"""
def are_equal(self, sp1, sp2):
"""
True if element:amounts are exactly the same, i.e.,
oxidation state is not considered.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are the same based on element
and amounts.
"""
comp1 = Composition(sp1)
comp2 = Composition(sp2)
return comp1.get_el_amt_dict() == comp2.get_el_amt_dict()
def get_hash(self, composition):
"""
Returns: Fractional element composition
"""
return composition.element_composition.fractional_composition
class FrameworkComparator(AbstractComparator):
"""
A Comparator that matches sites, regardless of species.
"""
def are_equal(self, sp1, sp2):
"""
True if there are atoms on both sites.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
True always
"""
return True
def get_hash(self, composition):
"""
No hash possible
"""
return 1
class OrderDisorderElementComparator(AbstractComparator):
"""
A Comparator that matches sites, given some overlap in the element
composition
"""
def are_equal(self, sp1, sp2):
"""
True if there is some overlap in composition between the species
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
True always
"""
set1 = set(sp1.element_composition.keys())
set2 = set(sp2.element_composition.keys())
if set1.intersection(set2):
return True
return False
def get_hash(self, composition):
""""
No hash possible
"""
return 1
class OccupancyComparator(AbstractComparator):
"""
A Comparator that matches occupancies on sites,
irrespective of the species of those sites.
"""
def are_equal(self, sp1, sp2):
"""
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
True if sets of occupancies (amt) are equal on both sites.
"""
set1 = set(sp1.element_composition.values())
set2 = set(sp2.element_composition.values())
if set1 == set2:
return True
else:
return False
def get_hash(self, composition):
# Difficult to define sensible hash
return 1
class StructureMatcher(MSONable):
"""
Class to match structures by similarity.
Algorithm:
1. Given two structures: s1 and s2
2. Optional: Reduce to primitive cells.
3. If the number of sites do not match, return False
4. Reduce to s1 and s2 to Niggli Cells
5. Optional: Scale s1 and s2 to same volume.
6. Optional: Remove oxidation states associated with sites
7. Find all possible lattice vectors for s2 within shell of ltol.
8. For s1, translate an atom in the smallest set to the origin
9. For s2: find all valid lattices from permutations of the list
of lattice vectors (invalid if: det(Lattice Matrix) < half
volume of original s2 lattice)
10. For each valid lattice:
a. If the lattice angles of are within tolerance of s1,
basis change s2 into new lattice.
b. For each atom in the smallest set of s2:
i. Translate to origin and compare fractional sites in
structure within a fractional tolerance.
ii. If true:
ia. Convert both lattices to cartesian and place
both structures on an average lattice
ib. Compute and return the average and max rms
displacement between the two structures normalized
by the average free length per atom
if fit function called:
if normalized max rms displacement is less than
stol. Return True
if get_rms_dist function called:
if normalized average rms displacement is less
than the stored rms displacement, store and
continue. (This function will search all possible
lattices for the smallest average rms displacement
between the two structures)
Args:
ltol (float): Fractional length tolerance. Default is 0.2.
stol (float): Site tolerance. Defined as the fraction of the
average free length per atom := ( V / Nsites ) ** (1/3)
Default is 0.3.
angle_tol (float): Angle tolerance in degrees. Default is 5 degrees.
primitive_cell (bool): If true: input structures will be reduced to
primitive cells prior to matching. Default to True.
scale (bool): Input structures are scaled to equivalent volume if
true; For exact matching, set to False.
attempt_supercell (bool): If set to True and number of sites in
cells differ after a primitive cell reduction (divisible by an
integer) attempts to generate a supercell transformation of the
smaller cell which is equivalent to the larger structure.
allow_subset (bool): Allow one structure to match to the subset of
another structure. Eg. Matching of an ordered structure onto a
disordered one, or matching a delithiated to a lithiated
structure. This option cannot be combined with
attempt_supercell, or with structure grouping.
comparator (Comparator): A comparator object implementing an equals
method that declares declaring equivalency of sites. Default is
SpeciesComparator, which implies rigid species
mapping, i.e., Fe2+ only matches Fe2+ and not Fe3+.
Other comparators are provided, e.g., ElementComparator which
matches only the elements and not the species.
The reason why a comparator object is used instead of
supplying a comparison function is that it is not possible to
pickle a function, which makes it otherwise difficult to use
StructureMatcher with Python's multiprocessing.
supercell_size (str): Method to use for determining the size of a
supercell (if applicable). Possible values are num_sites,
num_atoms, volume, or an element present in both structures.
ignored_species (list): A list of ions to be ignored in matching. Useful
for matching structures that have similar frameworks except for
certain ions, e.g., Li-ion intercalation frameworks. This is more
useful than allow_subset because it allows better control over
what species are ignored in the matching.
"""
def __init__(self, ltol=0.2, stol=0.3, angle_tol=5, primitive_cell=True,
scale=True, attempt_supercell=False, allow_subset=False,
comparator=SpeciesComparator(), supercell_size='num_sites',
ignored_species=None):
self.ltol = ltol
self.stol = stol
self.angle_tol = angle_tol
self._comparator = comparator
self._primitive_cell = primitive_cell
self._scale = scale
self._supercell = attempt_supercell
self._supercell_size = supercell_size
self._subset = allow_subset
self._ignored_species = [] if ignored_species is None else \
ignored_species[:]
def _get_supercell_size(self, s1, s2):
"""
Returns the supercell size, and whether the supercell should
be applied to s1. If fu == 1, s1_supercell is returned as
true, to avoid ambiguity.
"""
if self._supercell_size == 'num_sites':
fu = s2.num_sites / s1.num_sites
elif self._supercell_size == 'num_atoms':
fu = s2.composition.num_atoms / s1.composition.num_atoms
elif self._supercell_size == 'volume':
fu = s2.volume / s1.volume
else:
try:
el = get_el_sp(self._supercell_size)
fu = s2.composition[el] / s1.composition[el]
except:
raise ValueError('invalid argument for supercell_size')
if fu < 2/3:
return int(round(1/fu)), False
else:
return int(round(fu)), True
def _get_lattices(self, target_lattice, s, supercell_size=1):
"""
Yields lattices for s with lengths and angles close to the
lattice of target_s. If supercell_size is specified, the
returned lattice will have that number of primitive cells
in it
Args:
s, target_s: Structure objects
"""
lattices = s.lattice.find_all_mappings(
target_lattice, ltol=self.ltol, atol=self.angle_tol,
skip_rotation_matrix=True)
for l, _, scale_m in lattices:
if abs(abs(np.linalg.det(scale_m)) - supercell_size) < 0.5:
yield l, scale_m
def _get_supercells(self, struct1, struct2, fu, s1_supercell):
"""
Computes all supercells of one structure close to the lattice of the
other
if s1_supercell == True, it makes the supercells of struct1, otherwise
it makes them of s2
yields: s1, s2, supercell_matrix, average_lattice, supercell_matrix
"""
def av_lat(l1, l2):
params = (np.array(l1.lengths_and_angles) +
np.array(l2.lengths_and_angles)) / 2
return Lattice.from_lengths_and_angles(*params)
def sc_generator(s1, s2):
s2_fc = np.array(s2.frac_coords)
if fu == 1:
cc = np.array(s1.cart_coords)
for l, sc_m in self._get_lattices(s2.lattice, s1, fu):
fc = l.get_fractional_coords(cc)
fc -= np.floor(fc)
yield fc, s2_fc, av_lat(l, s2.lattice), sc_m
else:
fc_init = np.array(s1.frac_coords)
for l, sc_m in self._get_lattices(s2.lattice, s1, fu):
fc = np.dot(fc_init, np.linalg.inv(sc_m))
lp = lattice_points_in_supercell(sc_m)
fc = (fc[:, None, :] + lp[None, :, :]).reshape((-1, 3))
fc -= np.floor(fc)
yield fc, s2_fc, av_lat(l, s2.lattice), sc_m
if s1_supercell:
for x in sc_generator(struct1, struct2):
yield x
else:
for x in sc_generator(struct2, struct1):
# reorder generator output so s1 is still first
yield x[1], x[0], x[2], x[3]
def _cmp_fstruct(self, s1, s2, frac_tol, mask):
"""
Returns true if a matching exists between s2 and s2
under frac_tol. s2 should be a subset of s1
"""
if len(s2) > len(s1):
raise ValueError("s1 must be larger than s2")
if mask.shape != (len(s2), len(s1)):
raise ValueError("mask has incorrect shape")
return is_coord_subset_pbc(s2, s1, frac_tol, mask)
def _cart_dists(self, s1, s2, avg_lattice, mask, normalization, lll_frac_tol=None):
"""
Finds a matching in cartesian space. Finds an additional
fractional translation vector to minimize RMS distance
Args:
s1, s2: numpy arrays of fractional coordinates. len(s1) >= len(s2)
avg_lattice: Lattice on which to calculate distances
mask: numpy array of booleans. mask[i, j] = True indicates
that s2[i] cannot be matched to s1[j]
normalization (float): inverse normalization length
Returns:
Distances from s2 to s1, normalized by (V/Natom) ^ 1/3
Fractional translation vector to apply to s2.
Mapping from s1 to s2, i.e. with numpy slicing, s1[mapping] => s2
"""
if len(s2) > len(s1):
raise ValueError("s1 must be larger than s2")
if mask.shape != (len(s2), len(s1)):
raise ValueError("mask has incorrect shape")
# vectors are from s2 to s1
vecs, d_2 = pbc_shortest_vectors(avg_lattice, s2, s1, mask,
return_d2=True,
lll_frac_tol=lll_frac_tol)
lin = LinearAssignment(d_2)
s = lin.solution
short_vecs = vecs[np.arange(len(s)), s]
translation = np.average(short_vecs, axis=0)
f_translation = avg_lattice.get_fractional_coords(translation)
new_d2 = np.sum((short_vecs - translation) ** 2, axis=-1)
return new_d2 ** 0.5 * normalization, f_translation, s
def _get_mask(self, struct1, struct2, fu, s1_supercell):
"""
Returns mask for matching struct2 to struct1. If struct1 has sites
a b c, and fu = 2, assumes supercells of struct2 will be ordered
aabbcc (rather than abcabc)
Returns:
mask, struct1 translation indices, struct2 translation index
"""
mask = np.zeros((len(struct2), len(struct1), fu), dtype=np.bool)
inner = []
for sp2, i in itertools.groupby(enumerate(struct2.species_and_occu),
key=lambda x: x[1]):
i = list(i)
inner.append((sp2, slice(i[0][0], i[-1][0]+1)))
for sp1, j in itertools.groupby(enumerate(struct1.species_and_occu),
key=lambda x: x[1]):
j = list(j)
j = slice(j[0][0], j[-1][0]+1)
for sp2, i in inner:
mask[i, j, :] = not self._comparator.are_equal(sp1, sp2)
if s1_supercell:
mask = mask.reshape((len(struct2), -1))
else:
# supercell is of struct2, roll fu axis back to preserve
# correct ordering
mask = np.rollaxis(mask, 2, 1)
mask = mask.reshape((-1, len(struct1)))
# find the best translation indices
i = np.argmax(np.sum(mask, axis=-1))
inds = np.where(np.invert(mask[i]))[0]
if s1_supercell:
# remove the symmetrically equivalent s1 indices
inds = inds[::fu]
return np.array(mask, dtype=np.int_), inds, i
def fit(self, struct1, struct2):
"""
Fit two structures.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
True or False.
"""
struct1, struct2 = self._process_species([struct1, struct2])
if not self._subset and self._comparator.get_hash(struct1.composition) \
!= self._comparator.get_hash(struct2.composition):
return None
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
match = self._match(struct1, struct2, fu, s1_supercell,
break_on_match=True)
if match is None:
return False
else:
return match[0] <= self.stol
def get_rms_dist(self, struct1, struct2):
"""
Calculate RMS displacement between two structures
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
rms displacement normalized by (Vol / nsites) ** (1/3)
and maximum distance between paired sites. If no matching
lattice is found None is returned.
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
match = self._match(struct1, struct2, fu, s1_supercell, use_rms=True,
break_on_match=False)
if match is None:
return None
else:
return match[0], max(match[1])
def _process_species(self, structures):
copied_structures = []
for s in structures:
# We need the copies to be actual Structure to work properly, not
# subclasses. So do type(s) == Structure.
ss = s.copy() if type(s) == Structure else \
Structure.from_sites(s)
if self._ignored_species:
ss.remove_species(self._ignored_species)
copied_structures.append(ss)
return copied_structures
def _preprocess(self, struct1, struct2, niggli=True):
"""
Rescales, finds the reduced structures (primitive and niggli),
and finds fu, the supercell size to make struct1 comparable to
s2
"""
struct1 = struct1.copy()
struct2 = struct2.copy()
if niggli:
struct1 = struct1.get_reduced_structure(reduction_algo="niggli")
struct2 = struct2.get_reduced_structure(reduction_algo="niggli")
# primitive cell transformation
if self._primitive_cell:
struct1 = struct1.get_primitive_structure()
struct2 = struct2.get_primitive_structure()
if self._supercell:
fu, s1_supercell = self._get_supercell_size(struct1, struct2)
else:
fu, s1_supercell = 1, True
mult = fu if s1_supercell else 1/fu
# rescale lattice to same volume
if self._scale:
ratio = (struct2.volume / (struct1.volume * mult)) ** (1 / 6)
nl1 = Lattice(struct1.lattice.matrix * ratio)
struct1.modify_lattice(nl1)
nl2 = Lattice(struct2.lattice.matrix / ratio)
struct2.modify_lattice(nl2)
return struct1, struct2, fu, s1_supercell
def _match(self, struct1, struct2, fu, s1_supercell=True, use_rms=False,
break_on_match=False):
"""
Matches one struct onto the other
"""
ratio = fu if s1_supercell else 1/fu
if len(struct1) * ratio >= len(struct2):
return self._strict_match(
struct1, struct2, fu, s1_supercell=s1_supercell,
break_on_match=break_on_match, use_rms=use_rms)
else:
return self._strict_match(
struct2, struct1, fu, s1_supercell=(not s1_supercell),
break_on_match=break_on_match, use_rms=use_rms)
def _strict_match(self, struct1, struct2, fu, s1_supercell=True,
use_rms=False, break_on_match=False):
"""
Matches struct2 onto struct1 (which should contain all sites in
struct2).
Args:
struct1, struct2 (Structure): structures to be matched
fu (int): size of supercell to create
s1_supercell (bool): whether to create the supercell of
struct1 (vs struct2)
use_rms (bool): whether to minimize the rms of the matching
break_on_match (bool): whether to stop search at first
valid match
"""
if fu < 1:
raise ValueError("fu cannot be less than 1")
mask, s1_t_inds, s2_t_ind = self._get_mask(struct1, struct2,
fu, s1_supercell)
if mask.shape[0] > mask.shape[1]:
raise ValueError('after supercell creation, struct1 must '
'have more sites than struct2')
# check that a valid mapping exists
if (not self._subset) and mask.shape[1] != mask.shape[0]:
return None
if LinearAssignment(mask).min_cost > 0:
return None
best_match = None
# loop over all lattices
for s1fc, s2fc, avg_l, sc_m in \
self._get_supercells(struct1, struct2, fu, s1_supercell):
# compute fractional tolerance
normalization = (len(s1fc) / avg_l.volume) ** (1/3)
inv_abc = np.array(avg_l.reciprocal_lattice.abc)
frac_tol = inv_abc * self.stol / (np.pi * normalization)
# loop over all translations
for s1i in s1_t_inds:
t = s1fc[s1i] - s2fc[s2_t_ind]
t_s2fc = s2fc + t
if self._cmp_fstruct(s1fc, t_s2fc, frac_tol, mask):
inv_lll_abc = np.array(avg_l.get_lll_reduced_lattice().reciprocal_lattice.abc)
lll_frac_tol = inv_lll_abc * self.stol / (np.pi * normalization)
dist, t_adj, mapping = self._cart_dists(
s1fc, t_s2fc, avg_l, mask, normalization, lll_frac_tol)
if use_rms:
val = np.linalg.norm(dist) / len(dist) ** 0.5
else:
val = max(dist)
if best_match is None or val < best_match[0]:
total_t = t + t_adj
total_t -= np.round(total_t)
best_match = val, dist, sc_m, total_t, mapping
if (break_on_match or val < 1e-5) and val < self.stol:
return best_match
if best_match and best_match[0] < self.stol:
return best_match
def group_structures(self, s_list, anonymous=False):
"""
Given a list of structures, use fit to group
them by structural equality.
Args:
s_list ([Structure]): List of structures to be grouped
anonymous (bool): Wheher to use anonymous mode.
Returns:
A list of lists of matched structures
Assumption: if s1 == s2 but s1 != s3, than s2 and s3 will be put
in different groups without comparison.
"""
if self._subset:
raise ValueError("allow_subset cannot be used with"
" group_structures")
original_s_list = list(s_list)
s_list = self._process_species(s_list)
# Use structure hash to pre-group structures
if anonymous:
c_hash = lambda c: c.anonymized_formula
else:
c_hash = self._comparator.get_hash
s_hash = lambda s: c_hash(s[1].composition)
sorted_s_list = sorted(enumerate(s_list), key=s_hash)
all_groups = []
# For each pre-grouped list of structures, perform actual matching.
for k, g in itertools.groupby(sorted_s_list, key=s_hash):
unmatched = list(g)
while len(unmatched) > 0:
i, refs = unmatched.pop(0)
matches = [i]
if anonymous:
inds = filter(lambda i: self.fit_anonymous(refs,
unmatched[i][1]), list(range(len(unmatched))))
else:
inds = filter(lambda i: self.fit(refs, unmatched[i][1]),
list(range(len(unmatched))))
inds = list(inds)
matches.extend([unmatched[i][0] for i in inds])
unmatched = [unmatched[i] for i in range(len(unmatched))
if i not in inds]
all_groups.append([original_s_list[i] for i in matches])
return all_groups
def as_dict(self):
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"comparator": self._comparator.as_dict(),
"stol": self.stol,
"ltol": self.ltol,
"angle_tol": self.angle_tol,
"primitive_cell": self._primitive_cell,
"scale": self._scale}
@classmethod
def from_dict(cls, d):
return StructureMatcher(
ltol=d["ltol"], stol=d["stol"], angle_tol=d["angle_tol"],
primitive_cell=d["primitive_cell"], scale=d["scale"],
comparator=AbstractComparator.from_dict(d["comparator"]))
def _anonymous_match(self, struct1, struct2, fu, s1_supercell=True,
use_rms=False, break_on_match=False, single_match=False):
"""
Tries all permutations of matching struct1 to struct2.
Args:
struct1, struct2 (Structure): Preprocessed input structures
Returns:
List of (mapping, match)
"""
if not isinstance(self._comparator, SpeciesComparator):
raise ValueError('Anonymous fitting currently requires SpeciesComparator')
# check that species lists are comparable
sp1 = struct1.composition.elements
sp2 = struct2.composition.elements
if len(sp1) != len(sp2):
return None
ratio = fu if s1_supercell else 1/fu
swapped = len(struct1) * ratio < len(struct2)
s1_comp = struct1.composition
s2_comp = struct2.composition
matches = []
for perm in itertools.permutations(sp2):
sp_mapping = dict(zip(sp1, perm))
# do quick check that compositions are compatible
mapped_comp = Composition({sp_mapping[k]: v
for k, v in s1_comp.items()})
if (not self._subset) and (
self._comparator.get_hash(mapped_comp) !=
self._comparator.get_hash(s2_comp)):
continue
mapped_struct = struct1.copy()
mapped_struct.replace_species(sp_mapping)
if swapped:
m = self._strict_match(struct2, mapped_struct, fu,
(not s1_supercell), use_rms,
break_on_match)
else:
m = self._strict_match(mapped_struct, struct2, fu, s1_supercell,
use_rms, break_on_match)
if m:
matches.append((sp_mapping, m))
if single_match:
break
return matches
def get_rms_anonymous(self, struct1, struct2):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
(min_rms, min_mapping)
min_rms is the minimum rms distance, and min_mapping is the
corresponding minimal species mapping that would map
struct1 to struct2. (None, None) is returned if the minimax_rms
exceeds the threshold.
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
use_rms=True, break_on_match=False)
if matches:
best = sorted(matches, key=lambda x: x[1][0])[0]
return best[1][0], best[0]
else:
return None, None
def get_best_electronegativity_anonymous_mapping(self, struct1, struct2):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar. If multiple substitutions are within tolerance
this will return the one which minimizes the difference in
electronegativity between the matches species.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
min_mapping (Dict): Mapping of struct1 species to struct2 species
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
use_rms=True, break_on_match=True)
if matches:
min_X_diff = np.inf
for m in matches:
X_diff = 0
for k, v in m[0].items():
X_diff += struct1.composition[k] * (k.X - v.X) ** 2
if X_diff < min_X_diff:
min_X_diff = X_diff
best = m[0]
return best
def get_all_anonymous_mappings(self, struct1, struct2, niggli=True,
include_dist=False):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. Returns a dictionary of species
substitutions that are within tolerance
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
niggli (bool): Find niggli cell in preprocessing
include_dist (bool): Return the maximin distance with each mapping
Returns:
list of species mappings that map struct1 to struct2.
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2,
niggli)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
break_on_match=not include_dist)
if matches:
if include_dist:
return [(m[0], m[1][0]) for m in matches]
else:
return [m[0] for m in matches]
def fit_anonymous(self, struct1, struct2, niggli=True):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
True/False: Whether a species mapping can map struct1 to stuct2
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2,
niggli)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
break_on_match=True, single_match=True)
if matches:
return True
else:
return False
def get_supercell_matrix(self, supercell, struct):
"""
Returns the matrix for transforming struct to supercell. This
can be used for very distorted 'supercells' where the primitive cell
is impossible to find
"""
if self._primitive_cell:
raise ValueError("get_supercell_matrix cannot be used with the "
"primitive cell option")
struct, supercell, fu, s1_supercell = self._preprocess(struct,
supercell, False)
if not s1_supercell:
raise ValueError("The non-supercell must be put onto the basis"
" of the supercell, not the other way around")
match = self._match(struct, supercell, fu, s1_supercell, use_rms=True,
break_on_match=False)
if match is None:
return None
return match[2]
def get_transformation(self, struct1, struct2):
"""
Returns the supercell transformation, fractional translation vector,
and a mapping to transform struct2 to be similar to struct1.
Args:
struct1 (Structure): Reference structure
struct2 (Structure): Structure to transform.
Returns:
supercell (numpy.ndarray(3, 3)): supercell matrix
vector (numpy.ndarray(3)): fractional translation vector
mapping (list(int or None)):
The first len(struct1) items of the mapping vector are the
indices of struct1's corresponding sites in struct2 (or None
if there is no corresponding site), and the other items are
the remaining site indices of struct2.
"""
if self._primitive_cell:
raise ValueError("get_transformation cannot be used with the "
"primitive cell option")
struct1, struct2 = self._process_species((struct1, struct2))
s1, s2, fu, s1_supercell = self._preprocess(struct1, struct2, False)
ratio = fu if s1_supercell else 1/fu
if s1_supercell and fu > 1:
raise ValueError("Struct1 must be the supercell, "
"not the other way around")
if len(s1) * ratio >= len(s2):
# s1 is superset
match = self._strict_match(s1, s2, fu=fu, s1_supercell=False,
use_rms=True, break_on_match=False)
if match is None:
return None
# invert the mapping, since it needs to be from s1 to s2
mapping = [list(match[4]).index(i) if i in match[4] else None
for i in range(len(s1))]
return match[2], match[3], mapping
else:
# s2 is superset
match = self._strict_match(s2, s1, fu=fu, s1_supercell=True,
use_rms=True, break_on_match=False)
if match is None:
return None
# add sites not included in the mapping
not_included = list(range(len(s2) * fu))
for i in match[4]:
not_included.remove(i)
mapping = list(match[4]) + not_included
return match[2], -match[3], mapping
def get_s2_like_s1(self, struct1, struct2, include_ignored_species=True):
"""
Performs transformations on struct2 to put it in a basis similar to
struct1 (without changing any of the inter-site distances)
Args:
struct1 (Structure): Reference structure
struct2 (Structure): Structure to transform.
include_ignored_species (bool): Defaults to True,
the ignored_species is also transformed to the struct1
lattice orientation, though obviously there is no direct
matching to existing sites.
Returns:
A structure object similar to struct1, obtained by making a
supercell, sorting, and translating struct2.
"""
s1, s2 = self._process_species([struct1, struct2])
trans = self.get_transformation(s1, s2)
if trans is None:
return None
sc, t, mapping = trans
sites = [site for site in s2]
# Append the ignored sites at the end.
sites.extend([site for site in struct2 if site not in s2])
temp = Structure.from_sites(sites)
temp.make_supercell(sc)
temp.translate_sites(list(range(len(temp))), t)
# translate sites to correct unit cell
for i, j in enumerate(mapping[:len(s1)]):
if j is not None:
vec = np.round(struct1[i].frac_coords - temp[j].frac_coords)
temp.translate_sites(j, vec, to_unit_cell=False)
sites = [temp.sites[i] for i in mapping if i is not None]
if include_ignored_species:
start = int(round(len(temp) / len(struct2) * len(s2)))
sites.extend(temp.sites[start:])
return Structure.from_sites(sites)
def get_mapping(self, superset, subset):
"""
Calculate the mapping from superset to subset.
Args:
superset (Structure): Structure containing at least the sites in
subset (within the structure matching tolerance)
subset (Structure): Structure containing some of the sites in
superset (within the structure matching tolerance)
Returns:
numpy array such that superset.sites[mapping] is within matching
tolerance of subset.sites or None if no such mapping is possible
"""
if self._supercell:
raise ValueError("cannot compute mapping to supercell")
if self._primitive_cell:
raise ValueError("cannot compute mapping with primitive cell "
"option")
if len(subset) > len(superset):
raise ValueError("subset is larger than superset")
superset, subset, _, _ = self._preprocess(superset, subset, True)
match = self._strict_match(superset, subset, 1, break_on_match=False)
if match is None or match[0] > self.stol:
return None
return match[4]
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from murano.openstack.common import eventlet_backdoor
from murano.openstack.common.gettextutils import _LE, _LI, _LW
from murano.openstack.common import importutils
from murano.openstack.common import log as logging
from murano.openstack.common import systemd
from murano.openstack.common import threadgroup
rpc = importutils.try_import('murano.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
if rpc:
try:
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_LE('Exception during rpc cleanup.'))
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
|
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thrift PD interface basic tests
"""
import time
import sys
import logging
import unittest
import random
import oftest.dataplane as dataplane
import oftest.pd_base_tests as pd_base_tests
from oftest.testutils import *
import os
from utils import *
from p4_pd_rpc.ttypes import *
from res_pd_rpc.ttypes import *
from mc_pd_rpc.ttypes import *
this_dir = os.path.dirname(os.path.abspath(__file__))
#global defaults
inner_rmac_group = 1
outer_rmac_group = 2
rewrite_index = 1
vrf = 1
rmac = '00:33:33:33:33:33'
#Enable features based on p4src/p4feature.h
tunnel_enabled =1
ipv6_enabled = 1
acl_enabled = 1
learn_timeout = 6
def populate_default_entries(client, sess_hdl, dev_tgt):
client.validate_outer_ethernet_set_default_action_set_valid_outer_unicast_packet(
sess_hdl, dev_tgt)
client.validate_outer_ipv4_packet_set_default_action_set_valid_outer_ipv4_packet(
sess_hdl, dev_tgt)
client.validate_outer_ipv6_packet_set_default_action_set_valid_outer_ipv6_packet(
sess_hdl, dev_tgt)
client.smac_set_default_action_smac_miss(
sess_hdl, dev_tgt)
client.dmac_set_default_action_dmac_miss(
sess_hdl, dev_tgt)
client.learn_notify_set_default_action_nop(
sess_hdl, dev_tgt)
client.rmac_set_default_action_on_miss(
sess_hdl, dev_tgt)
client.ipv4_fib_set_default_action_on_miss(
sess_hdl, dev_tgt)
client.fwd_result_set_default_action_nop(
sess_hdl, dev_tgt)
client.nexthop_set_default_action_nop(
sess_hdl, dev_tgt)
client.egress_block_set_default_action_set_egress_drop(
sess_hdl, dev_tgt)
client.rid_set_default_action_nop(
sess_hdl, dev_tgt)
client.rewrite_set_default_action_nop(
sess_hdl, dev_tgt)
client.egress_vlan_xlate_set_default_action_nop(
sess_hdl, dev_tgt)
if acl_enabled:
client.ip_acl_set_default_action_nop(
sess_hdl, dev_tgt)
client.ipv4_racl_set_default_action_nop(
sess_hdl, dev_tgt)
client.validate_packet_set_default_action_nop(
sess_hdl, dev_tgt)
if tunnel_enabled:
client.outer_rmac_set_default_action_nop(
sess_hdl, dev_tgt)
client.ipv4_src_vtep_set_default_action_nop(
sess_hdl, dev_tgt)
client.ipv4_dest_vtep_set_default_action_nop(
sess_hdl, dev_tgt)
client.egress_bd_map_set_default_action_nop(
sess_hdl, dev_tgt)
if ipv6_enabled and tunnel_enabled:
client.ipv6_src_vtep_set_default_action_nop(
sess_hdl, dev_tgt)
client.ipv6_dest_vtep_set_default_action_nop(
sess_hdl, dev_tgt)
if ipv6_enabled and acl_enabled:
client.ipv6_acl_set_default_action_nop(
sess_hdl, dev_tgt)
client.ipv6_racl_set_default_action_nop(
sess_hdl, dev_tgt)
def populate_init_entries(client, sess_hdl, dev_tgt):
match_spec = dc_example_mac_rewrite_match_spec_t(
l2_metadata_egress_smac_idx=rewrite_index,
ipv4_dstAddr=0,
ipv4_dstAddr_mask=0)
action_spec = dc_example_rewrite_unicast_mac_action_spec_t(
action_smac=macAddr_to_string(rmac))
client.mac_rewrite_table_add_with_rewrite_unicast_mac(
sess_hdl, dev_tgt,
match_spec, 1000, action_spec)
match_spec = dc_example_fwd_result_match_spec_t(
l2_metadata_l2_redirect=0,
l2_metadata_l2_redirect_mask=0,
acl_metadata_acl_redirect=0,
acl_metadata_acl_redirect_mask=0,
acl_metadata_racl_redirect=0,
acl_metadata_racl_redirect_mask=0,
l3_metadata_fib_hit=1,
l3_metadata_fib_hit_mask=1)
client.fwd_result_table_add_with_set_fib_redirect_action(
sess_hdl, dev_tgt,
match_spec, 1000)
match_spec = dc_example_fwd_result_match_spec_t(
l2_metadata_l2_redirect=1,
l2_metadata_l2_redirect_mask=1,
acl_metadata_acl_redirect=0,
acl_metadata_acl_redirect_mask=0,
acl_metadata_racl_redirect=0,
acl_metadata_racl_redirect_mask=0,
l3_metadata_fib_hit=0,
l3_metadata_fib_hit_mask=0)
client.fwd_result_table_add_with_set_l2_redirect_action(
sess_hdl, dev_tgt,
match_spec, 1000)
#Add default inner rmac entry
match_spec = dc_example_rmac_match_spec_t(
l3_metadata_rmac_group=inner_rmac_group,
l2_metadata_lkp_mac_da=macAddr_to_string(rmac))
client.rmac_table_add_with_set_rmac_hit_flag(
sess_hdl, dev_tgt,
match_spec)
match_spec = dc_example_egress_system_acl_match_spec_t(
l3_metadata_mtu_check_fail=0,
l3_metadata_mtu_check_fail_mask=0,
l2_metadata_prune=0,
l2_metadata_prune_mask=0x3FF)
client.egress_system_acl_table_add_with_egress_drop(
sess_hdl, dev_tgt,
match_spec, 1000)
if tunnel_enabled:
#Add default outer rmac entry
match_spec = dc_example_outer_rmac_match_spec_t(
tunnel_metadata_outer_rmac_group=outer_rmac_group,
l2_metadata_lkp_mac_da=macAddr_to_string(rmac))
client.outer_rmac_table_add_with_set_outer_rmac_hit_flag(
sess_hdl, dev_tgt,
match_spec)
def add_ports(client, sess_hdl, dev_tgt, port_count):
count = 1
while (count <= port_count):
match_spec = dc_example_port_mapping_match_spec_t(standard_metadata_ingress_port=count)
action_spec = dc_example_set_ifindex_action_spec_t(
action_ifindex=count,
action_if_label=0)
client.port_mapping_table_add_with_set_ifindex(
sess_hdl, dev_tgt,
match_spec, action_spec)
action_spec = dc_example_set_lag_port_action_spec_t(
action_port=count)
mbr_hdl = client.lag_action_profile_add_member_with_set_lag_port(
sess_hdl, dev_tgt,
action_spec)
match_spec = dc_example_lag_group_match_spec_t(
l2_metadata_egress_ifindex=count)
client.lag_group_add_entry(
sess_hdl, dev_tgt,
match_spec, mbr_hdl)
count = count + 1
def program_outer_vlan(client, sess_hdl, dev_tgt, vlan, port, v4_enabled, v6_enabled, outer_rmac):
action_spec = dc_example_set_bd_action_spec_t(
action_outer_vlan_bd=vlan,
action_vrf=vrf,
action_rmac_group=outer_rmac,
action_ipv4_unicast_enabled=v4_enabled,
action_ipv6_unicast_enabled=v6_enabled,
action_stp_group=0)
mbr_hdl = client.outer_bd_action_profile_add_member_with_set_bd(
sess_hdl, dev_tgt,
action_spec)
match_spec = dc_example_port_vlan_mapping_match_spec_t(
l2_metadata_ifindex=port,
vlan_tag__0__valid=0,
vlan_tag__0__vid=0,
vlan_tag__1__valid=0,
vlan_tag__1__vid=0)
client.port_vlan_mapping_add_entry(
sess_hdl, dev_tgt,
match_spec, mbr_hdl)
def program_inner_vlan(client, sess_hdl, dev_tgt, vlan, v4_enabled, v6_enabled, inner_rmac, uuc_mc_index):
match_spec = dc_example_bd_match_spec_t(
l2_metadata_bd=vlan)
action_spec = dc_example_set_bd_info_action_spec_t(
action_vrf=vrf,
action_rmac_group=inner_rmac,
action_bd_label=0,
action_uuc_mc_index=uuc_mc_index,
action_umc_mc_index=0,
action_bcast_mc_index=0,
action_ipv4_unicast_enabled=v4_enabled,
action_ipv6_unicast_enabled=v6_enabled,
action_igmp_snooping_enabled=0,
action_mld_snooping_enabled=0,
action_stp_group=0)
client.bd_table_add_with_set_bd_info(
sess_hdl, dev_tgt,
match_spec, action_spec)
def program_tunnel_vlan(client, sess_hdl, dev_tgt, vlan, port, vni, ttype, v4_enabled, inner_rmac):
match_spec = dc_example_tunnel_match_spec_t(
tunnel_metadata_tunnel_vni=vni,
tunnel_metadata_ingress_tunnel_type=ttype,
inner_ipv4_valid=1)
action_spec = dc_example_terminate_tunnel_inner_ipv4_action_spec_t(
action_bd=vlan,
action_vrf=vrf,
action_rmac_group=inner_rmac,
action_bd_label=0,
action_uuc_mc_index=0,
action_umc_mc_index=0,
action_bcast_mc_index=0,
action_ipv4_unicast_enabled=v4_enabled,
action_igmp_snooping_enabled=0)
client.tunnel_table_add_with_terminate_tunnel_inner_ipv4(
sess_hdl, dev_tgt,
match_spec, action_spec)
def add_mac(client, sess_hdl, dev_tgt, vlan, mac, port):
match_spec = dc_example_dmac_match_spec_t(
l2_metadata_lkp_mac_da=macAddr_to_string(mac),
l2_metadata_bd=vlan)
action_spec = dc_example_dmac_hit_action_spec_t(
action_ifindex=port)
client.dmac_table_add_with_dmac_hit(
sess_hdl, dev_tgt,
match_spec, action_spec, 0)
match_spec = dc_example_smac_match_spec_t(
l2_metadata_lkp_mac_sa=macAddr_to_string(mac),
l2_metadata_bd=vlan)
action_spec = dc_example_smac_hit_action_spec_t(
action_ifindex=port)
client.smac_table_add_with_smac_hit(
sess_hdl, dev_tgt,
match_spec, action_spec)
def add_mac_with_nexthop(client, sess_hdl, dev_tgt, vlan, mac, port, nhop):
match_spec = dc_example_dmac_match_spec_t(
l2_metadata_lkp_mac_da=macAddr_to_string(mac),
l2_metadata_bd=vlan)
action_spec = dc_example_dmac_redirect_nexthop_action_spec_t(
action_nexthop_index=nhop)
client.dmac_table_add_with_dmac_redirect_nexthop(
sess_hdl, dev_tgt,
match_spec, action_spec, 0)
match_spec = dc_example_smac_match_spec_t(
l2_metadata_lkp_mac_sa=macAddr_to_string(mac),
l2_metadata_bd=vlan)
action_spec = dc_example_smac_hit_action_spec_t(
action_ifindex=port)
client.smac_table_add_with_smac_hit(
sess_hdl, dev_tgt,
match_spec, action_spec)
def add_v4_route(client, sess_hdl, dev_tgt, vrf, ip, prefix, nhop):
if prefix == 32:
match_spec = dc_example_ipv4_fib_match_spec_t(
l3_metadata_vrf=vrf,
ipv4_metadata_lkp_ipv4_da=ip)
action_spec = dc_example_fib_hit_nexthop_action_spec_t(
action_nexthop_index=nhop)
client.ipv4_fib_table_add_with_fib_hit_nexthop(
sess_hdl, dev_tgt,
match_spec, action_spec)
else:
match_spec = dc_example_ipv4_fib_lpm_match_spec_t(
l3_metadata_vrf=vrf,
ipv4_metadata_lkp_ipv4_da=ip,
ipv4_metadata_lkp_ipv4_da_prefix_length=prefix)
action_spec = dc_example_fib_hit_nexthop_action_spec_t(
action_nexthop_index=nhop)
client.ipv4_fib_lpm_table_add_with_fib_hit_nexthop(
sess_hdl, dev_tgt,
match_spec, action_spec)
def add_v6_route(client, sess_hdl, dev_tgt, vrf, ip, prefix, nhop):
if ipv6_enabled == 0:
return
if prefix == 128:
match_spec = dc_example_ipv6_fib_match_spec_t(
l3_metadata_vrf=vrf,
ipv6_metadata_lkp_ipv6_da=ipv6Addr_to_string(ip))
action_spec = dc_example_fib_hit_nexthop_action_spec_t(
action_nexthop_index=nhop)
client.ipv6_fib_table_add_with_fib_hit_nexthop(
sess_hdl, dev_tgt,
match_spec, action_spec)
else:
match_spec = dc_example_ipv6_fib_lpm_match_spec_t(
l3_metadata_vrf=vrf,
ipv6_metadata_lkp_ipv6_da=ip,
ipv6_metadata_lkp_ipv6_da_prefix_length=prefix)
action_spec = dc_example_fib_hit_nexthop_action_spec_t(
action_nexthop_index=nhop)
client.ipv6_fib_lpm_table_add_with_fib_hit_nexthop(
sess_hdl, dev_tgt,
match_spec, action_spec)
def add_nexthop(client, sess_hdl, dev_tgt, nhop, vlan, ifindex):
match_spec = dc_example_nexthop_match_spec_t(
l3_metadata_nexthop_index=nhop)
action_spec = dc_example_set_nexthop_details_action_spec_t(
action_ifindex=ifindex,
action_bd=vlan)
client.nexthop_table_add_with_set_nexthop_details(
sess_hdl, dev_tgt,
match_spec, action_spec)
def add_v4_unicast_rewrite(client, sess_hdl, dev_tgt, nhop, dmac):
match_spec = dc_example_rewrite_match_spec_t(
l3_metadata_nexthop_index=nhop)
action_spec = dc_example_set_ipv4_unicast_rewrite_action_spec_t(
action_smac_idx=rewrite_index,
action_dmac=macAddr_to_string(dmac))
client.rewrite_table_add_with_set_ipv4_unicast_rewrite(
sess_hdl, dev_tgt,
match_spec, action_spec)
def add_v6_unicast_rewrite(client, sess_hdl, dev_tgt, nhop, dmac):
if ipv6_enabled == 0:
return
match_spec = dc_example_rewrite_match_spec_t(
l3_metadata_nexthop_index=nhop)
action_spec = dc_example_set_ipv6_unicast_rewrite_action_spec_t(
action_smac_idx=rewrite_index,
action_dmac=macAddr_to_string(dmac))
client.rewrite_table_add_with_set_ipv6_unicast_rewrite(
sess_hdl, dev_tgt,
match_spec, action_spec)
def enable_learning(client, sess_hdl, dev_tgt):
match_spec = dc_example_learn_notify_match_spec_t(
l2_metadata_l2_src_miss=1,
l2_metadata_l2_src_miss_mask=1,
l2_metadata_l2_src_move=0,
l2_metadata_l2_src_move_mask=0,
l2_metadata_stp_state=0,
l2_metadata_stp_state_mask=0)
client.learn_notify_table_add_with_generate_learn_notify(
sess_hdl, dev_tgt,
match_spec, 1000)
#Basic L2 Test case
class L2Test(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "dc_example")
def runTest(self):
print
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
print "Cleaning state"
self.client.clean_all(sess_hdl, dev_tgt)
#Add the default entries
populate_default_entries(self.client, sess_hdl, dev_tgt)
populate_init_entries(self.client, sess_hdl, dev_tgt)
#Create two ports
add_ports(self.client, sess_hdl, dev_tgt, 2)
vlan=10
port1=1
port2=2
v4_enabled=0
v6_enabled=0
#Add ports to vlan
#Outer vlan table programs (port, vlan) mapping and derives the bd
#Inner vlan table derives the bd state
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port1, v4_enabled, v6_enabled, 0)
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port2, v4_enabled, v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, vlan, v4_enabled, v6_enabled, 0, 0)
#Add static macs to ports. (vlan, mac -> port)
add_mac(self.client, sess_hdl, dev_tgt, vlan, '00:11:11:11:11:11', 1)
add_mac(self.client, sess_hdl, dev_tgt, vlan, '00:22:22:22:22:22', 2)
print "Sending packet port 1 -> port 2 on vlan 10 (192.168.0.1 -> 10.0.0.1 [id = 101])"
pkt = simple_tcp_packet(eth_dst='00:22:22:22:22:22',
eth_src='00:11:11:11:11:11',
ip_dst='10.0.0.1',
ip_src='192.168.0.1',
ip_id=101,
ip_ttl=64,
ip_ihl=5)
self.dataplane.send(1, str(pkt))
verify_packets(self, pkt, [2])
#Basic L3 Test case
class L3Ipv4Test(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "dc_example")
def runTest(self):
print
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
print "Cleaning state"
self.client.clean_all(sess_hdl, dev_tgt)
#Add the default entries
populate_default_entries(self.client, sess_hdl, dev_tgt)
populate_init_entries(self.client, sess_hdl, dev_tgt)
#Create two ports
add_ports(self.client, sess_hdl, dev_tgt, 2)
vlan1=10
vlan2=11
port1=1
port2=2
v4_enabled=1
v6_enabled=0
#For every L3 port, an implicit vlan will be allocated
#Add ports to vlan
#Outer vlan table programs (port, vlan) mapping and derives the bd
#Inner vlan table derives the bd state
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan1, port1, v4_enabled, v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, vlan1, v4_enabled, v6_enabled, inner_rmac_group, 0)
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan2, port2, v4_enabled, v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, vlan2, v4_enabled, v6_enabled, inner_rmac_group, 0)
#Create nexthop
nhop1=1
add_nexthop(self.client, sess_hdl, dev_tgt, nhop1, vlan1, port1)
#Add rewrite information (ARP info)
add_v4_unicast_rewrite(self.client, sess_hdl, dev_tgt, nhop1, '00:11:11:11:11:11')
#Add route
add_v4_route(self.client, sess_hdl, dev_tgt, vrf, 0x0a0a0a01, 32, nhop1)
#Create nexthop
nhop2=2
add_nexthop(self.client, sess_hdl, dev_tgt, nhop2, vlan2, port2)
#Add rewrite information (ARP info)
add_v4_unicast_rewrite(self.client, sess_hdl, dev_tgt, nhop2, '00:22:22:22:22:22')
#Add route
add_v4_route(self.client, sess_hdl, dev_tgt, vrf, 0x14141401, 32, nhop2)
print "Sending packet port 1 -> port 2 (10.10.10.1 -> 20.20.20.1 [id = 101])"
pkt = simple_tcp_packet(eth_dst='00:33:33:33:33:33',
eth_src='00:11:11:11:11:11',
ip_dst='20.20.20.1',
ip_src='10.10.10.1',
ip_id=101,
ip_ttl=64,
ip_ihl=5)
exp_pkt = simple_tcp_packet(eth_dst='00:22:22:22:22:22',
eth_src='00:33:33:33:33:33',
ip_dst='20.20.20.1',
ip_src='10.10.10.1',
ip_id=101,
ip_ttl=63,
ip_ihl=5)
self.dataplane.send(1, str(pkt))
verify_packets(self, exp_pkt, [2])
class L3Ipv6Test(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "dc_example")
def runTest(self):
print
if ipv6_enabled == 0:
print "ipv6 not enabled"
return
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
print "Cleaning state"
self.client.clean_all(sess_hdl, dev_tgt)
#Add the default entries
populate_default_entries(self.client, sess_hdl, dev_tgt)
populate_init_entries(self.client, sess_hdl, dev_tgt)
#Create two ports
add_ports(self.client, sess_hdl, dev_tgt, 2)
vlan1=10
vlan2=11
port1=1
port2=2
v4_enabled=0
v6_enabled=1
#For every L3 port, an implicit vlan will be allocated
#Add ports to vlan
#Outer vlan table programs (port, vlan) mapping and derives the bd
#Inner vlan table derives the bd state
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan1, port1, v4_enabled, v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, vlan1, v4_enabled, v6_enabled, inner_rmac_group, 0)
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan2, port2, v4_enabled, v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, vlan2, v4_enabled, v6_enabled, inner_rmac_group, 0)
#Create nexthop
nhop1=1
add_nexthop(self.client, sess_hdl, dev_tgt, nhop1, vlan1, port1)
#Add rewrite information (ARP info)
add_v6_unicast_rewrite(self.client, sess_hdl, dev_tgt, nhop1, '00:11:11:11:11:11')
#Add route
add_v6_route(self.client, sess_hdl, dev_tgt, vrf, '2000::1', 128, nhop1)
#Create nexthop
nhop2=2
add_nexthop(self.client, sess_hdl, dev_tgt, nhop2, vlan2, port2)
#Add rewrite information (ARP info)
add_v6_unicast_rewrite(self.client, sess_hdl, dev_tgt, nhop2, '00:22:22:22:22:22')
#Add route
add_v6_route(self.client, sess_hdl, dev_tgt, vrf, '3000::1', 128, nhop2)
print "Sending packet port 1 -> port 2 (10.10.10.1 -> 20.20.20.1 [id = 101])"
pkt = simple_tcpv6_packet(eth_dst='00:33:33:33:33:33',
eth_src='00:11:11:11:11:11',
ipv6_dst='3000::1',
ipv6_src='2000::1',
ipv6_hlim=64)
exp_pkt = simple_tcpv6_packet(eth_dst='00:22:22:22:22:22',
eth_src='00:33:33:33:33:33',
ipv6_dst='3000::1',
ipv6_src='2000::1',
ipv6_hlim=63)
self.dataplane.send(1, str(pkt))
verify_packets(self, exp_pkt, [2])
#Basic Vxlan Tunneling Test case
class L2VxlanTunnelTest(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "dc_example")
def runTest(self):
print
if tunnel_enabled == 0:
print "tunnel not enabled"
return
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
print "Cleaning state"
self.client.clean_all(sess_hdl, dev_tgt)
#Add the default entries
populate_default_entries(self.client, sess_hdl, dev_tgt)
populate_init_entries(self.client, sess_hdl, dev_tgt)
#Create two ports
add_ports(self.client, sess_hdl, dev_tgt, 2)
port1=1
port2=2
outer_v4_enabled=1
inner_v4_enabled=0
outer_v6_enabled=0
inner_v6_enabled=0
core_vlan=10
tenant_vlan=1000
vnid=0x1234
#Indicates vxlan tunnel in Parser
tunnel_type=1
#Port2 belong to core vlan
#Outer vlan table will derive core bd and the src vtep, dest vtep and vnid will derive the tenant bd
program_outer_vlan(self.client, sess_hdl, dev_tgt, core_vlan, port2, outer_v4_enabled, outer_v6_enabled, outer_rmac_group)
program_tunnel_vlan(self.client, sess_hdl, dev_tgt, tenant_vlan, port2, vnid, tunnel_type, inner_v4_enabled, 0)
#Port1 belong to tenant vlan
#Outer vlan table will derive tenant bd and inner bd table will derive bd state
program_outer_vlan(self.client, sess_hdl, dev_tgt, tenant_vlan, port1, inner_v4_enabled, inner_v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, tenant_vlan, inner_v4_enabled, inner_v6_enabled, 0, 0)
#Ingress Tunnel Decap - src vtep entry
match_spec = dc_example_ipv4_src_vtep_match_spec_t(
l3_metadata_vrf=vrf,
ipv4_metadata_lkp_ipv4_sa=0x0a0a0a02)
action_spec = dc_example_set_tunnel_lif_action_spec_t(
action_lif=0)
self.client.ipv4_src_vtep_table_add_with_set_tunnel_lif(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Ingress Tunnel Decap - dest vtep entry
match_spec = dc_example_ipv4_dest_vtep_match_spec_t(
l3_metadata_vrf=vrf,
ipv4_metadata_lkp_ipv4_da=0x0a0a0a01,
l3_metadata_lkp_ip_proto=17,
l3_metadata_lkp_l4_dport=4789)
self.client.ipv4_dest_vtep_table_add_with_set_tunnel_termination_flag(
sess_hdl, dev_tgt,
match_spec)
#Add static macs to ports. (vlan, mac -> port)
#Nextop should be created during mac lookup when the destinaion interface is a tunnel.
#Nexthop allocated will derive egress bd in the ingress and derive rewrite info
# at egress
nhop=1
add_mac(self.client, sess_hdl, dev_tgt, tenant_vlan, '00:11:11:11:11:11', port1)
add_mac_with_nexthop(self.client, sess_hdl, dev_tgt, tenant_vlan, '00:22:22:22:22:22', port2, nhop)
#add nexthop table
add_nexthop(self.client, sess_hdl, dev_tgt, nhop, tenant_vlan, port2)
#Egress Tunnel Encap - Rewrite information
match_spec = dc_example_rewrite_match_spec_t(
l3_metadata_nexthop_index=nhop)
action_spec = dc_example_set_ipv4_vxlan_rewrite_action_spec_t(
action_outer_bd=core_vlan,
action_tunnel_src_index=0,
action_tunnel_dst_index=0,
action_smac_idx=rewrite_index,
action_dmac=macAddr_to_string('00:55:55:55:55:55'))
self.client.rewrite_table_add_with_set_ipv4_vxlan_rewrite(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Encap - Add tunnel header based on tunnel type
match_spec = dc_example_tunnel_rewrite_match_spec_t(
tunnel_metadata_egress_tunnel_type=tunnel_type,
ipv4_valid=1,
tcp_valid=1,
udp_valid=0)
self.client.tunnel_rewrite_table_add_with_ipv4_vxlan_inner_ipv4_tcp_rewrite(
sess_hdl, dev_tgt,
match_spec)
#Egress Tunnel Encap - Source IP rewrite
match_spec = dc_example_tunnel_src_rewrite_match_spec_t(
tunnel_metadata_tunnel_src_index=0)
action_spec = dc_example_rewrite_tunnel_ipv4_src_action_spec_t(
action_ip=0x0a0a0a01)
self.client.tunnel_src_rewrite_table_add_with_rewrite_tunnel_ipv4_src(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Encap - Destination IP rewrite
match_spec = dc_example_tunnel_dst_rewrite_match_spec_t(
tunnel_metadata_tunnel_dst_index=0)
action_spec = dc_example_rewrite_tunnel_ipv4_dst_action_spec_t(
action_ip=0x0a0a0a02)
self.client.tunnel_dst_rewrite_table_add_with_rewrite_tunnel_ipv4_dst(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Encap - Derive vnid from egress bd mapping
match_spec = dc_example_egress_bd_map_match_spec_t(
l2_metadata_egress_bd=tenant_vlan)
action_spec = dc_example_set_egress_bd_properties_action_spec_t(
action_vnid=0x1234)
self.client.egress_bd_map_table_add_with_set_egress_bd_properties(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Decap - Decapsulate the vxlan header
match_spec = dc_example_tunnel_decap_match_spec_t(
tunnel_metadata_ingress_tunnel_type=tunnel_type,
inner_ipv4_valid=1,
inner_tcp_valid=1,
inner_udp_valid=0)
self.client.tunnel_decap_table_add_with_decapsulate_vxlan_packet_inner_ipv4_tcp(
sess_hdl, dev_tgt,
match_spec)
print "Sending packet port 1 -> port 2 - Vxlan tunnel encap"
print "Inner packet (192.168.10.1 -> 192.168.20.2 [id = 101])"
print "Outer packet (10.10.10.1 -> 10.10.10.2 [vnid = 0x1234, id = 101])"
pkt = simple_tcp_packet(eth_dst='00:22:22:22:22:22',
eth_src='00:11:11:11:11:11',
ip_dst='192.168.10.2',
ip_src='192.168.10.1',
ip_id=101,
ip_ttl=64)
vxlan_pkt = simple_vxlan_packet(
eth_dst='00:55:55:55:55:55',
eth_src='00:33:33:33:33:33',
ip_id=0,
ip_dst='10.10.10.2',
ip_src='10.10.10.1',
ip_ttl=64,
udp_sport=4966,
with_udp_chksum=False,
vxlan_vni=0x1234,
inner_frame=pkt)
self.dataplane.send(1, str(pkt))
verify_packets(self, vxlan_pkt, [2])
print "Sending packet port 2 -> port 1 - Vxlan tunnel decap"
print "Inner packet (192.168.10.2 -> 192.168.20.1 [id = 101])"
print "Outer packet (10.10.10.2 -> 10.10.10.1 [vnid = 0x1234, id = 101])"
pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='192.168.10.1',
ip_src='192.168.10.2',
ip_id=101,
ip_ttl=64)
vxlan_pkt = simple_vxlan_packet(
eth_dst='00:33:33:33:33:33',
eth_src='00:55:55:55:55:55',
ip_id=0,
ip_dst='10.10.10.1',
ip_src='10.10.10.2',
ip_ttl=63,
udp_sport=4966,
with_udp_chksum=False,
vxlan_vni=0x1234,
inner_frame=pkt)
self.dataplane.send(2, str(vxlan_pkt))
verify_packets(self, pkt, [1])
class L3VxlanTunnelTest(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "dc_example")
def runTest(self):
print
if tunnel_enabled == 0:
print "tunnel not enabled"
return
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
print "Cleaning state"
self.client.clean_all(sess_hdl, dev_tgt)
#Add the default entries
populate_default_entries(self.client, sess_hdl, dev_tgt)
populate_init_entries(self.client, sess_hdl, dev_tgt)
#Create two ports
add_ports(self.client, sess_hdl, dev_tgt, 2)
port1=1
port2=2
outer_v4_enabled=1
inner_v4_enabled=1
outer_v6_enabled=0
inner_v6_enabled=0
core_vlan=10
tenant_vlan1=1000
tenant_vlan2=2000
vnid=0x1234
#Indicates vxlan tunnel in Parser
tunnel_type=1
#Port2 belong to core vlan
#Outer vlan table will derive core bd and the src vtep, dest vtep and vnid will derive the tenant bd
program_outer_vlan(self.client, sess_hdl, dev_tgt, core_vlan, port2, outer_v4_enabled, outer_v6_enabled, outer_rmac_group)
program_tunnel_vlan(self.client, sess_hdl, dev_tgt, tenant_vlan2, port2, vnid, tunnel_type, inner_v4_enabled, inner_rmac_group)
#Port1 belong to tenant vlan
#Outer vlan table will derive tenant bd and inner bd table will derive bd state
program_outer_vlan(self.client, sess_hdl, dev_tgt, tenant_vlan1, port1, inner_v4_enabled, inner_v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, tenant_vlan1, inner_v4_enabled, inner_v6_enabled, inner_rmac_group, 0)
#Ingress Tunnel Decap - src vtep entry
match_spec = dc_example_ipv4_src_vtep_match_spec_t(
l3_metadata_vrf=vrf,
ipv4_metadata_lkp_ipv4_sa=0x0a0a0a02)
action_spec = dc_example_set_tunnel_lif_action_spec_t(
action_lif=0)
self.client.ipv4_src_vtep_table_add_with_set_tunnel_lif(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Ingress Tunnel Decap - dest vtep entry
match_spec = dc_example_ipv4_dest_vtep_match_spec_t(
l3_metadata_vrf=vrf,
ipv4_metadata_lkp_ipv4_da=0x0a0a0a01,
l3_metadata_lkp_ip_proto=17,
l3_metadata_lkp_l4_dport=4789)
self.client.ipv4_dest_vtep_table_add_with_set_tunnel_termination_flag(
sess_hdl, dev_tgt,
match_spec)
#Add L3 routes
nhop1=1
nhop2=2
add_v4_route(self.client, sess_hdl, dev_tgt, vrf, 0x0aa80a01, 32, nhop1)
add_v4_route(self.client, sess_hdl, dev_tgt, vrf, 0x0aa80b01, 32, nhop2)
#Add nexthop table
add_nexthop(self.client, sess_hdl, dev_tgt, nhop1, tenant_vlan1, port1)
add_nexthop(self.client, sess_hdl, dev_tgt, nhop2, tenant_vlan2, port2)
#Egress Tunnel Encap - Rewrite information
match_spec = dc_example_rewrite_match_spec_t(
l3_metadata_nexthop_index=nhop2)
action_spec = dc_example_set_ipv4_vxlan_rewrite_action_spec_t(
action_outer_bd=core_vlan,
action_tunnel_src_index=0,
action_tunnel_dst_index=0,
action_smac_idx=rewrite_index,
action_dmac=macAddr_to_string('00:55:55:55:55:55'))
self.client.rewrite_table_add_with_set_ipv4_vxlan_rewrite(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Encap - Add tunnel header based on tunnel type
match_spec = dc_example_tunnel_rewrite_match_spec_t(
tunnel_metadata_egress_tunnel_type=tunnel_type,
ipv4_valid=1,
tcp_valid=1,
udp_valid=0)
self.client.tunnel_rewrite_table_add_with_ipv4_vxlan_inner_ipv4_tcp_rewrite(
sess_hdl, dev_tgt,
match_spec)
#Egress Tunnel Encap - Source IP rewrite
match_spec = dc_example_tunnel_src_rewrite_match_spec_t(
tunnel_metadata_tunnel_src_index=0)
action_spec = dc_example_rewrite_tunnel_ipv4_src_action_spec_t(
action_ip=0x0a0a0a01)
self.client.tunnel_src_rewrite_table_add_with_rewrite_tunnel_ipv4_src(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Encap - Destination IP rewrite
match_spec = dc_example_tunnel_dst_rewrite_match_spec_t(
tunnel_metadata_tunnel_dst_index=0)
action_spec = dc_example_rewrite_tunnel_ipv4_dst_action_spec_t(
action_ip=0x0a0a0a02)
self.client.tunnel_dst_rewrite_table_add_with_rewrite_tunnel_ipv4_dst(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Encap - Derive vnid from egress bd mapping
match_spec = dc_example_egress_bd_map_match_spec_t(
l2_metadata_egress_bd=tenant_vlan2)
action_spec = dc_example_set_egress_bd_properties_action_spec_t(
action_vnid=0x1234)
self.client.egress_bd_map_table_add_with_set_egress_bd_properties(
sess_hdl, dev_tgt,
match_spec, action_spec)
#Egress Tunnel Decap - Decapsulate the vxlan header
match_spec = dc_example_tunnel_decap_match_spec_t(
tunnel_metadata_ingress_tunnel_type=tunnel_type,
inner_ipv4_valid=1,
inner_tcp_valid=1,
inner_udp_valid=0)
self.client.tunnel_decap_table_add_with_decapsulate_vxlan_packet_inner_ipv4_tcp(
sess_hdl, dev_tgt,
match_spec)
print "Sending packet port 1 -> port 2 - Vxlan tunnel encap"
print "Inner packet (10.168.10.1 -> 10.168.11.1 [id = 101])"
print "Outer packet (10.10.10.1 -> 10.10.10.2 [vnid = 0x1234, id = 101])"
pkt1 = simple_tcp_packet(eth_dst='00:33:33:33:33:33',
eth_src='00:11:11:11:11:11',
ip_dst='10.168.11.1',
ip_src='10.168.10.1',
ip_id=101,
ip_ttl=64)
pkt2 = simple_tcp_packet(eth_dst='00:33:33:33:33:33',
eth_src='00:11:11:11:11:11',
ip_dst='10.168.11.1',
ip_src='10.168.10.1',
ip_id=101,
ip_ttl=63)
vxlan_pkt = simple_vxlan_packet(
eth_dst='00:55:55:55:55:55',
eth_src='00:33:33:33:33:33',
ip_id=0,
ip_dst='10.10.10.2',
ip_src='10.10.10.1',
ip_ttl=63,
udp_sport=14479,
with_udp_chksum=False,
vxlan_vni=0x1234,
inner_frame=pkt2)
self.dataplane.send(1, str(pkt1))
verify_packets(self, vxlan_pkt, [2])
print "Sending packet port 2 -> port 1 - Vxlan tunnel decap"
print "Inner packet (10.168.11.1 -> 10.168.10.1 [id = 101])"
print "Outer packet (10.10.10.2 -> 10.10.10.1 [vnid = 0x1234, id = 101])"
pkt = simple_tcp_packet(eth_dst='00:33:33:33:33:33',
eth_src='00:22:22:22:22:22',
ip_dst='10.168.10.1',
ip_src='10.168.11.1',
ip_id=101,
ip_ttl=64)
vxlan_pkt = simple_vxlan_packet(
eth_dst='00:33:33:33:33:33',
eth_src='00:55:55:55:55:55',
ip_id=0,
ip_dst='10.10.10.1',
ip_src='10.10.10.2',
ip_ttl=64,
udp_sport=14479,
with_udp_chksum=False,
vxlan_vni=0x1234,
inner_frame=pkt)
self.dataplane.send(2, str(vxlan_pkt))
verify_packets(self, pkt, [1])
class L2LearningTest(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "dc_example")
def runTest(self):
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
print "Cleaning state"
self.client.clean_all(sess_hdl, dev_tgt)
#Add the default entries
populate_default_entries(self.client, sess_hdl, dev_tgt)
populate_init_entries(self.client, sess_hdl, dev_tgt)
#Create two ports
add_ports(self.client, sess_hdl, dev_tgt, 2)
vlan=10
port1=1
port2=2
v4_enabled=0
v6_enabled=0
#Add ports to vlan
#Outer vlan table programs (port, vlan) mapping and derives the bd
#Inner vlan table derives the bd state
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port1, v4_enabled, v6_enabled, 0)
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port2, v4_enabled, v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, vlan, v4_enabled, v6_enabled, 0, 0)
enable_learning(self.client, sess_hdl, dev_tgt)
self.client.set_learning_timeout(sess_hdl, 0, learn_timeout * 1000)
self.client.mac_learn_digest_register(sess_hdl, 0)
pkt = simple_tcp_packet(eth_dst='00:44:44:44:44:44',
eth_src='00:22:22:22:22:22',
ip_dst='10.168.10.1',
ip_src='10.168.11.1',
ip_id=101,
ip_ttl=64)
self.dataplane.send(1, str(pkt))
time.sleep(learn_timeout + 1)
digests = self.client.mac_learn_digest_get_digest(sess_hdl)
assert len(digests.msg) == 1
print "new mac learnt ",
for b in string_to_bytes(digests.msg[0].l2_metadata_lkp_mac_sa):
print("%02x:" % (b)),
print "on port ", digests.msg[0].l2_metadata_ifindex
self.client.mac_learn_digest_digest_notify_ack(sess_hdl, digests.msg_ptr)
self.client.mac_learn_digest_deregister(sess_hdl, 0)
class L2FloodTest(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "dc_example")
def runTest(self):
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
print "Cleaning state"
self.client.clean_all(sess_hdl, dev_tgt)
#Add the default entries
populate_default_entries(self.client, sess_hdl, dev_tgt)
populate_init_entries(self.client, sess_hdl, dev_tgt)
#Create two ports
add_ports(self.client, sess_hdl, dev_tgt, 4)
vlan=10
port1=1
port2=2
port3=3
port4=4
v4_enabled=0
v6_enabled=0
mgid = 0x100
rid = 0x200
#Add ports to vlan
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port1, v4_enabled, v6_enabled, 0)
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port2, v4_enabled, v6_enabled, 0)
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port3, v4_enabled, v6_enabled, 0)
program_outer_vlan(self.client, sess_hdl, dev_tgt, vlan, port4, v4_enabled, v6_enabled, 0)
program_inner_vlan(self.client, sess_hdl, dev_tgt, vlan, v4_enabled, v6_enabled, 0, mgid)
port_map = [0] * 32
lag_map = [0] * 32
port_map[0] = (1 << port1) + (1 << port2) + (1 << port3) + (1 << port4)
mgrp_hdl = self.mc.mc_mgrp_create(sess_hdl, dev_tgt, mgid)
l1_hdl = self.mc.mc_l1_node_create(sess_hdl, dev_tgt, rid)
self.mc.mc_l1_associate_node(sess_hdl, dev_tgt, mgrp_hdl, l1_hdl)
l2_hdl = self.mc.mc_l2_node_create(sess_hdl, dev_tgt, l1_hdl, port_map, lag_map)
pkt = simple_tcp_packet(eth_dst='00:44:44:44:44:44',
eth_src='00:22:22:22:22:22',
ip_dst='10.168.10.1',
ip_src='10.168.11.1',
ip_id=101,
ip_ttl=64)
self.dataplane.send(1, str(pkt))
verify_packets(self, pkt, [port2, port3, port4])
time.sleep(1)
self.mc.mc_l2_node_destroy(sess_hdl, dev_tgt, l2_hdl)
self.mc.mc_l1_node_destroy(sess_hdl, dev_tgt, l1_hdl)
self.mc.mc_mgrp_destroy(sess_hdl, dev_tgt, mgrp_hdl)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classifier class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.session_bundle import exporter
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `dict` of `Tensor`s.
Returns:
Tuple of default classification signature and empty named signatures.
"""
signature = exporter.classification_signature(
examples,
classes_tensor=predictions[Classifier.CLASS_OUTPUT],
scores_tensor=predictions[Classifier.PROBABILITY_OUTPUT])
return signature, {}
def _get_classifier_metrics(unused_n_classes):
return {
('accuracy', 'classes'): metrics_lib.streaming_accuracy
}
class Classifier(estimator.Estimator):
"""Classifier single output Estimator.
Given logits generating function, provides class / probabilities heads and
functions to work with them.
"""
CLASS_OUTPUT = 'classes'
PROBABILITY_OUTPUT = 'probabilities'
def __init__(self, model_fn, n_classes, model_dir=None, config=None,
params=None):
"""Constructor for Classifier.
Args:
model_fn: (targets, predictions, mode) -> logits, loss, train_op
n_classes: Number of classes
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object (optional)
params: `dict` of hyper parameters that will be passed into `model_fn`.
"""
self._n_classes = n_classes
self._logits_fn = model_fn
if params:
model_fn = self._classifier_model_with_params
else:
model_fn = self._classifier_model
super(Classifier, self).__init__(model_fn=model_fn,
model_dir=model_dir, config=config,
params=params)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None):
"""Evaluates given model with provided evaluation data.
See superclass Estimator for more details.
Args:
x: features.
y: targets.
input_fn: Input function.
feed_fn: Function creating a feed dict every time it is called.
batch_size: minibatch size to use on the input.
steps: Number of steps for which to evaluate model.
metrics: Dict of metric ops to run. If None, the default metrics are used.
name: Name of the evaluation.
Returns:
Returns `dict` with evaluation results.
"""
metrics = metrics or _get_classifier_metrics(self._n_classes)
return super(Classifier, self).evaluate(x=x,
y=y,
input_fn=input_fn,
batch_size=batch_size,
steps=steps,
metrics=metrics,
name=name)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=False):
"""Returns predicted classes for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes (or an iterable of predicted classes if
as_iterable is True).
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
predictions = super(Classifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable,
outputs=[Classifier.CLASS_OUTPUT])
if as_iterable:
return (p[Classifier.CLASS_OUTPUT] for p in predictions)
else:
return predictions[Classifier.CLASS_OUTPUT]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=False):
"""Returns predicted probabilty distributions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted probability distributions (or an iterable of
predicted probability distributions if as_iterable is True).
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
predictions = super(Classifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable,
outputs=[Classifier.PROBABILITY_OUTPUT])
if as_iterable:
return (p[Classifier.PROBABILITY_OUTPUT] for p in predictions)
else:
return predictions[Classifier.PROBABILITY_OUTPUT]
def _classifier_model(self, features, targets, mode):
return self._convert_to_estimator_model_result(
self._logits_fn(features, targets, mode))
def _classifier_model_with_params(self, features, targets, mode, params):
return self._convert_to_estimator_model_result(
self._logits_fn(features, targets, mode, params))
def _convert_to_estimator_model_result(self, logits_fn_result):
logits, loss, train_op = logits_fn_result
return {
Classifier.CLASS_OUTPUT:
math_ops.argmax(logits, len(logits.get_shape()) - 1),
Classifier.PROBABILITY_OUTPUT: nn.softmax(logits)
}, loss, train_op
|
|
from __future__ import absolute_import
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
from ._segmentation import _ve_step, _interaction_energy
NITERS = 10
NGB_SIZE = 26
BETA = 0.1
nonzero = lambda x: np.maximum(x, 1e-50)
log = lambda x: np.log(nonzero(x))
class Segmentation(object):
def __init__(self, data, mask=None, mu=None, sigma=None,
ppm=None, prior=None, U=None,
ngb_size=NGB_SIZE, beta=BETA):
"""
Class for multichannel Markov random field image segmentation
using the variational EM algorithm. For details regarding the
underlying algorithm, see:
Roche et al, 2011. On the convergence of EM-like algorithms
for image segmentation using Markov random fields. Medical
Image Analysis (DOI: 10.1016/j.media.2011.05.002).
Parameters
----------
data : array-like
Input image array
mask : array-like or tuple of array
Input mask to restrict the segmentation
beta : float
Markov regularization parameter
mu : array-like
Initial class-specific means
sigma : array-like
Initial class-specific variances
"""
data = data.squeeze()
if not len(data.shape) in (3, 4):
raise ValueError('Invalid input image')
if len(data.shape) == 3:
nchannels = 1
space_shape = data.shape
else:
nchannels = data.shape[-1]
space_shape = data.shape[0:-1]
self.nchannels = nchannels
# Make default mask (required by MRF regularization). This wil
# be passed to the _ve_step C-routine, which assumes a
# contiguous int array and raise an error otherwise. Voxels on
# the image borders are further rejected to avoid segmentation
# faults.
if mask is None:
mask = np.ones(space_shape, dtype=bool)
X, Y, Z = np.where(mask)
XYZ = np.zeros((X.shape[0], 3), dtype='intp')
XYZ[:, 0], XYZ[:, 1], XYZ[:, 2] = X, Y, Z
self.XYZ = XYZ
self.mask = mask
self.data = data[mask]
if nchannels == 1:
self.data = np.reshape(self.data, (self.data.shape[0], 1))
# By default, the ppm is initialized as a collection of
# uniform distributions
if ppm is None:
nclasses = len(mu)
self.ppm = np.zeros(list(space_shape) + [nclasses])
self.ppm[mask] = 1. / nclasses
self.is_ppm = False
self.mu = np.array(mu, dtype='double').reshape(\
(nclasses, nchannels))
self.sigma = np.array(sigma, dtype='double').reshape(\
(nclasses, nchannels, nchannels))
elif mu is None:
nclasses = ppm.shape[-1]
self.ppm = np.asarray(ppm)
self.is_ppm = True
self.mu = np.zeros((nclasses, nchannels))
self.sigma = np.zeros((nclasses, nchannels, nchannels))
else:
raise ValueError('missing information')
self.nclasses = nclasses
if prior is not None:
self.prior = np.asarray(prior)[self.mask].reshape(\
[self.data.shape[0], nclasses])
else:
self.prior = None
self.ngb_size = int(ngb_size)
self.set_markov_prior(beta, U=U)
def set_markov_prior(self, beta, U=None):
if U is not None: # make sure it's C-contiguous
self.U = np.asarray(U).copy()
else: # Potts model
U = np.ones((self.nclasses, self.nclasses))
U[_diag_indices(self.nclasses)] = 0
self.U = U
self.beta = float(beta)
def vm_step(self, freeze=()):
classes = list(range(self.nclasses))
for i in freeze:
classes.remove(i)
for i in classes:
P = self.ppm[..., i][self.mask].ravel()
Z = nonzero(P.sum())
tmp = self.data.T * P.T
mu = tmp.sum(1) / Z
mu_ = mu.reshape((len(mu), 1))
sigma = np.dot(tmp, self.data) / Z - np.dot(mu_, mu_.T)
self.mu[i] = mu
self.sigma[i] = sigma
def log_external_field(self):
"""
Compute the logarithm of the external field, where the
external field is defined as the likelihood times the
first-order component of the prior.
"""
lef = np.zeros([self.data.shape[0], self.nclasses])
for i in range(self.nclasses):
centered_data = self.data - self.mu[i]
if self.nchannels == 1:
inv_sigma = 1. / nonzero(self.sigma[i])
norm_factor = np.sqrt(inv_sigma.squeeze())
else:
inv_sigma = np.linalg.inv(self.sigma[i])
norm_factor = 1. / np.sqrt(\
nonzero(np.linalg.det(self.sigma[i])))
maha_dist = np.sum(centered_data * np.dot(inv_sigma,
centered_data.T).T, 1)
lef[:, i] = -.5 * maha_dist
lef[:, i] += log(norm_factor)
if self.prior is not None:
lef += log(self.prior)
return lef
def normalized_external_field(self):
f = self.log_external_field().T
f -= np.max(f, 0)
np.exp(f, f)
f /= f.sum(0)
return f.T
def ve_step(self):
nef = self.normalized_external_field()
if self.beta == 0:
self.ppm[self.mask] = np.reshape(\
nef, self.ppm[self.mask].shape)
else:
self.ppm = _ve_step(self.ppm, nef, self.XYZ,
self.U, self.ngb_size, self.beta)
def run(self, niters=NITERS, freeze=()):
if self.is_ppm:
self.vm_step(freeze=freeze)
for i in range(niters):
self.ve_step()
self.vm_step(freeze=freeze)
self.is_ppm = True
def map(self):
"""
Return the maximum a posterior label map
"""
return map_from_ppm(self.ppm, self.mask)
def free_energy(self, ppm=None):
"""
Compute the free energy defined as:
F(q, theta) = int q(x) log q(x)/p(x,y/theta) dx
associated with input parameters mu,
sigma and beta (up to an ignored constant).
"""
if ppm is None:
ppm = self.ppm
q = ppm[self.mask]
# Entropy term
lef = self.log_external_field()
f1 = np.sum(q * (log(q) - lef))
# Interaction term
if self.beta > 0.0:
f2 = self.beta * _interaction_energy(ppm, self.XYZ,
self.U, self.ngb_size)
else:
f2 = 0.0
return f1 + f2
def _diag_indices(n, ndim=2):
# diag_indices function present in numpy 1.4 and later. This for
# compatibility with numpy < 1.4
idx = np.arange(n)
return (idx,) * ndim
def moment_matching(dat, mu, sigma, glob_mu, glob_sigma):
"""
Moment matching strategy for parameter initialization to feed a
segmentation algorithm.
Parameters
----------
data: array
Image data.
mu : array
Template class-specific intensity means
sigma : array
Template class-specific intensity variances
glob_mu : float
Template global intensity mean
glob_sigma : float
Template global intensity variance
Returns
-------
dat_mu: array
Guess of class-specific intensity means
dat_sigma: array
Guess of class-specific intensity variances
"""
dat_glob_mu = float(np.mean(dat))
dat_glob_sigma = float(np.var(dat))
a = np.sqrt(dat_glob_sigma / glob_sigma)
b = dat_glob_mu - a * glob_mu
dat_mu = a * mu + b
dat_sigma = (a ** 2) * sigma
return dat_mu, dat_sigma
def map_from_ppm(ppm, mask=None):
x = np.zeros(ppm.shape[0:-1], dtype='uint8')
if mask is None:
mask = ppm == 0
x[mask] = ppm[mask].argmax(-1) + 1
return x
def binarize_ppm(q):
"""
Assume input ppm is masked (ndim==2)
"""
bin_q = np.zeros(q.shape)
bin_q[:q.shape[0], np.argmax(q, axis=1)] = 1
return bin_q
|
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for API tests."""
import os
import uuid
import warnings
import fixtures
import mock
from oslo_config import fixture as fixture_config
from oslotest import mockpatch
import six
from six.moves.urllib import parse as urlparse
import sqlalchemy
import testscenarios.testcase
from testtools import testcase
from ceilometer import storage
from ceilometer.tests import base as test_base
try:
from ceilometer.tests import mocks
except ImportError:
mocks = None # happybase module is not Python 3 compatible yet
class MongoDbManager(fixtures.Fixture):
def __init__(self, url):
self._url = url
def setUp(self):
super(MongoDbManager, self).setUp()
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='.*you must provide a username and password.*')
try:
self.connection = storage.get_connection(
self.url, 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
self.url, 'ceilometer.alarm.storage')
self.event_connection = storage.get_connection(
self.url, 'ceilometer.event.storage')
except storage.StorageBadVersion as e:
raise testcase.TestSkipped(six.text_type(e))
@property
def url(self):
return '%(url)s_%(db)s' % {
'url': self._url,
'db': uuid.uuid4().hex
}
class SQLManager(fixtures.Fixture):
def setUp(self):
super(SQLManager, self).setUp()
self.connection = storage.get_connection(
self.url, 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
self.url, 'ceilometer.alarm.storage')
self.event_connection = storage.get_connection(
self.url, 'ceilometer.event.storage')
@property
def url(self):
return self._url.replace('template1', self._db_name)
class PgSQLManager(SQLManager):
def __init__(self, url):
self._url = url
self._db_name = 'ceilometer_%s' % uuid.uuid4().hex
self._engine = sqlalchemy.create_engine(self._url)
self._conn = self._engine.connect()
self._conn.connection.set_isolation_level(0)
self._conn.execute(
'CREATE DATABASE %s WITH TEMPLATE template0;' % self._db_name)
self._conn.connection.set_isolation_level(1)
self._conn.close()
self._engine.dispose()
class MySQLManager(SQLManager):
def __init__(self, url):
self._url = url
self._db_name = 'ceilometer_%s' % uuid.uuid4().hex
self._engine = sqlalchemy.create_engine(
self._url.replace('template1', ''))
self._conn = self._engine.connect()
self._conn.execute('CREATE DATABASE %s;' % self._db_name)
self._conn.close()
self._engine.dispose()
class ElasticSearchManager(fixtures.Fixture):
def __init__(self, url):
self.url = url
def setUp(self):
super(ElasticSearchManager, self).setUp()
self.connection = storage.get_connection(
'sqlite://', 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
'sqlite://', 'ceilometer.alarm.storage')
self.event_connection = storage.get_connection(
self.url, 'ceilometer.event.storage')
# prefix each test with unique index name
self.event_connection.index_name = 'events_%s' % uuid.uuid4().hex
# force index on write so data is queryable right away
self.event_connection._refresh_on_write = True
class HBaseManager(fixtures.Fixture):
def __init__(self, url):
self._url = url
def setUp(self):
super(HBaseManager, self).setUp()
self.connection = storage.get_connection(
self.url, 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
self.url, 'ceilometer.alarm.storage')
self.event_connection = storage.get_connection(
self.url, 'ceilometer.event.storage')
# Unique prefix for each test to keep data is distinguished because
# all test data is stored in one table
data_prefix = str(uuid.uuid4().hex)
def table(conn, name):
return mocks.MockHBaseTable(name, conn, data_prefix)
# Mock only real HBase connection, MConnection "table" method
# stays origin.
mock.patch('happybase.Connection.table', new=table).start()
# We shouldn't delete data and tables after each test,
# because it last for too long.
# All tests tables will be deleted in setup-test-env.sh
mock.patch("happybase.Connection.disable_table",
new=mock.MagicMock()).start()
mock.patch("happybase.Connection.delete_table",
new=mock.MagicMock()).start()
mock.patch("happybase.Connection.create_table",
new=mock.MagicMock()).start()
@property
def url(self):
return '%s?table_prefix=%s&table_prefix_separator=%s' % (
self._url,
os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"),
os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", "_")
)
class SQLiteManager(fixtures.Fixture):
def __init__(self, url):
self.url = url
def setUp(self):
super(SQLiteManager, self).setUp()
self.connection = storage.get_connection(
self.url, 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
self.url, 'ceilometer.alarm.storage')
self.event_connection = storage.get_connection(
self.url, 'ceilometer.event.storage')
class TestBase(testscenarios.testcase.WithScenarios, test_base.BaseTestCase):
DRIVER_MANAGERS = {
'mongodb': MongoDbManager,
'mysql': MySQLManager,
'postgresql': PgSQLManager,
'db2': MongoDbManager,
'sqlite': SQLiteManager,
'es': ElasticSearchManager,
}
if mocks is not None:
DRIVER_MANAGERS['hbase'] = HBaseManager
db_url = 'sqlite://' # NOTE(Alexei_987) Set default db url
def setUp(self):
super(TestBase, self).setUp()
engine = urlparse.urlparse(self.db_url).scheme
# NOTE(Alexei_987) Shortcut to skip expensive db setUp
test_method = self._get_test_method()
if (hasattr(test_method, '_run_with')
and engine not in test_method._run_with):
raise testcase.TestSkipped(
'Test is not applicable for %s' % engine)
self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF([], project='ceilometer', validate_default_values=True)
try:
self.db_manager = self._get_driver_manager(engine)(self.db_url)
except ValueError as exc:
self.skipTest("missing driver manager: %s" % exc)
self.useFixture(self.db_manager)
self.conn = self.db_manager.connection
self.conn.upgrade()
self.alarm_conn = self.db_manager.alarm_connection
self.alarm_conn.upgrade()
self.event_conn = self.db_manager.event_connection
self.event_conn.upgrade()
self.useFixture(mockpatch.Patch('ceilometer.storage.get_connection',
side_effect=self._get_connection))
# Set a default location for the pipeline config file so the
# tests work even if ceilometer is not installed globally on
# the system.
self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline')
self.CONF.set_override(
'pipeline_cfg_file',
self.path_get('etc/ceilometer/pipeline.yaml')
)
def tearDown(self):
self.event_conn.clear()
self.event_conn = None
self.alarm_conn.clear()
self.alarm_conn = None
self.conn.clear()
self.conn = None
super(TestBase, self).tearDown()
def _get_connection(self, url, namespace):
if namespace == "ceilometer.alarm.storage":
return self.alarm_conn
elif namespace == "ceilometer.event.storage":
return self.event_conn
return self.conn
def _get_driver_manager(self, engine):
manager = self.DRIVER_MANAGERS.get(engine)
if not manager:
raise ValueError('No manager available for %s' % engine)
return manager
def run_with(*drivers):
"""Used to mark tests that are only applicable for certain db driver.
Skips test if driver is not available.
"""
def decorator(test):
if isinstance(test, type) and issubclass(test, TestBase):
# Decorate all test methods
for attr in dir(test):
value = getattr(test, attr)
if callable(value) and attr.startswith('test_'):
if six.PY3:
value._run_with = drivers
else:
value.__func__._run_with = drivers
else:
test._run_with = drivers
return test
return decorator
@six.add_metaclass(test_base.SkipNotImplementedMeta)
class MixinTestsWithBackendScenarios(object):
scenarios = [
('sqlite', {'db_url': 'sqlite://'}),
]
for db in ('MONGODB', 'MYSQL', 'PGSQL', 'HBASE', 'DB2', 'ES'):
if os.environ.get('CEILOMETER_TEST_%s_URL' % db):
scenarios.append(
(db.lower(), {'db_url': os.environ.get(
'CEILOMETER_TEST_%s_URL' % db)}))
scenarios_db = [db for db, _ in scenarios]
# Insert default value for hbase test
if 'hbase' not in scenarios_db:
scenarios.append(
('hbase', {'db_url': 'hbase://__test__'}))
# Insert default value for db2 test
if 'mongodb' in scenarios_db and 'db2' not in scenarios_db:
scenarios.append(
('db2', {'db_url': os.environ.get('CEILOMETER_TEST_MONGODB_URL',
'').replace('mongodb://',
'db2://')}))
|
|
from __future__ import absolute_import, division, unicode_literals, print_function
import collections
import contextlib
import logging
import logging.handlers
import sys
import threading
import uuid
import warnings
from flexget import __version__
from flexget.utils.tools import io_encoding
# A level more detailed than DEBUG
TRACE = 5
# A level more detailed than INFO
VERBOSE = 15
# Stores `task`, logging `session_id`, and redirected `output` stream in a thread local context
local_context = threading.local()
def get_level_no(level):
if not isinstance(level, int):
# Python logging api is horrible. This is getting the level number, which is required on python 2.6.
level = logging.getLevelName(level.upper())
return level
@contextlib.contextmanager
def task_logging(task):
"""Context manager which adds task information to log messages."""
old_task = getattr(local_context, 'task', '')
local_context.task = task
try:
yield
finally:
local_context.task = old_task
class SessionFilter(logging.Filter):
def __init__(self, session_id):
self.session_id = session_id
def filter(self, record):
return getattr(record, 'session_id', None) == self.session_id
@contextlib.contextmanager
def capture_output(stream, loglevel=None):
"""Context manager which captures all log and console output to given `stream` while in scope."""
root_logger = logging.getLogger()
old_level = root_logger.getEffectiveLevel()
old_id = getattr(local_context, 'session_id', None)
# Keep using current, or create one if none already set
local_context.session_id = old_id or uuid.uuid4()
old_output = getattr(local_context, 'output', None)
old_loglevel = getattr(local_context, 'loglevel', None)
streamhandler = logging.StreamHandler(stream)
streamhandler.setFormatter(FlexGetFormatter())
streamhandler.addFilter(SessionFilter(local_context.session_id))
if loglevel is not None:
loglevel = get_level_no(loglevel)
streamhandler.setLevel(loglevel)
# If requested loglevel is lower than the root logger is filtering for, we need to turn it down.
# All existing handlers should have their desired level set and not be affected.
if not root_logger.isEnabledFor(loglevel):
root_logger.setLevel(loglevel)
local_context.output = stream
local_context.loglevel = loglevel
root_logger.addHandler(streamhandler)
try:
yield
finally:
root_logger.removeHandler(streamhandler)
root_logger.setLevel(old_level)
local_context.session_id = old_id
local_context.output = old_output
local_context.loglevel = old_loglevel
def get_capture_stream():
"""If output is currently being redirected to a stream, returns that stream."""
return getattr(local_context, 'output', None)
def get_capture_loglevel():
"""If output is currently being redirected to a stream, returns declared loglevel for that stream."""
return getattr(local_context, 'loglevel', None)
def console(text):
"""
Print to console safely. Output is able to be captured by different streams in different contexts.
Any plugin wishing to output to the user's console should use this function instead of print so that
output can be redirected when FlexGet is invoked from another process.
"""
if not isinstance(text, str):
text = unicode(text).encode(io_encoding, 'replace')
output = getattr(local_context, 'output', sys.stdout)
print(text, file=output)
class RollingBuffer(collections.deque):
"""File-like that keeps a certain number of lines of text in memory."""
def write(self, line):
self.append(line)
class FlexGetLogger(logging.Logger):
"""Custom logger that adds trace and verbose logging methods, and contextual information to log records."""
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
extra = extra or {}
extra.update(
task=getattr(local_context, 'task', ''),
session_id=getattr(local_context, 'session_id', ''))
# Replace newlines in log messages with \n
if isinstance(msg, basestring):
msg = msg.replace('\n', '\\n')
return logging.Logger.makeRecord(self, name, level, fn, lno, msg, args, exc_info, func, extra)
def trace(self, msg, *args, **kwargs):
"""Log at TRACE level (more detailed than DEBUG)."""
self.log(TRACE, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
"""Log at VERBOSE level (displayed when FlexGet is run interactively.)"""
self.log(VERBOSE, msg, *args, **kwargs)
class FlexGetFormatter(logging.Formatter):
"""Custom formatter that can handle both regular log records and those created by FlexGetLogger"""
flexget_fmt = '%(asctime)-15s %(levelname)-8s %(name)-13s %(task)-15s %(message)s'
def __init__(self):
logging.Formatter.__init__(self, self.flexget_fmt, '%Y-%m-%d %H:%M')
def format(self, record):
if not hasattr(record, 'task'):
record.task = ''
return logging.Formatter.format(self, record)
_logging_configured = False
_buff_handler = None
_logging_started = False
# Stores the last 50 debug messages
debug_buffer = RollingBuffer(maxlen=50)
def initialize(unit_test=False):
"""Prepare logging.
"""
global _logging_configured, _logging_started, _buff_handler
if _logging_configured:
return
if 'dev' in __version__:
warnings.filterwarnings('always', category=DeprecationWarning, module='flexget.*')
warnings.simplefilter('once', append=True)
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(VERBOSE, 'VERBOSE')
_logging_configured = True
# with unit test we want a bit simpler setup
if unit_test:
logging.basicConfig()
_logging_started = True
return
# Store any log messages in a buffer until we `start` function is run
logger = logging.getLogger()
_buff_handler = logging.handlers.BufferingHandler(1000 * 1000)
logger.addHandler(_buff_handler)
logger.setLevel(logging.NOTSET)
# Add a handler that sores the last 50 debug lines to `debug_buffer` for use in crash reports
crash_handler = logging.StreamHandler(debug_buffer)
crash_handler.setLevel(logging.DEBUG)
crash_handler.setFormatter(FlexGetFormatter())
logger.addHandler(crash_handler)
def start(filename=None, level=logging.INFO, to_console=True, to_file=True):
"""After initialization, start file logging.
"""
global _logging_started
assert _logging_configured
if _logging_started:
return
# root logger
logger = logging.getLogger()
level = get_level_no(level)
logger.setLevel(level)
formatter = FlexGetFormatter()
if to_file:
file_handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1000 * 1024, backupCount=9)
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
logger.addHandler(file_handler)
# without --cron we log to console
if to_console:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
console_handler.setLevel(level)
logger.addHandler(console_handler)
# flush what we have stored from the plugin initialization
logger.removeHandler(_buff_handler)
if _buff_handler:
for record in _buff_handler.buffer:
if logger.isEnabledFor(record.levelno):
logger.handle(record)
_buff_handler.flush()
_logging_started = True
# Set our custom logger class as default
logging.setLoggerClass(FlexGetLogger)
|
|
"""Support for Homekit covers."""
import logging
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
SUPPORT_SET_TILT_POSITION,
CoverDevice,
)
from homeassistant.const import STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING
from . import KNOWN_DEVICES, HomeKitEntity
STATE_STOPPED = "stopped"
_LOGGER = logging.getLogger(__name__)
CURRENT_GARAGE_STATE_MAP = {
0: STATE_OPEN,
1: STATE_CLOSED,
2: STATE_OPENING,
3: STATE_CLOSING,
4: STATE_STOPPED,
}
TARGET_GARAGE_STATE_MAP = {STATE_OPEN: 0, STATE_CLOSED: 1, STATE_STOPPED: 2}
CURRENT_WINDOW_STATE_MAP = {0: STATE_OPENING, 1: STATE_CLOSING, 2: STATE_STOPPED}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Legacy set up platform."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit covers."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
def async_add_service(aid, service):
info = {"aid": aid, "iid": service["iid"]}
if service["stype"] == "garage-door-opener":
async_add_entities([HomeKitGarageDoorCover(conn, info)], True)
return True
if service["stype"] in ("window-covering", "window"):
async_add_entities([HomeKitWindowCover(conn, info)], True)
return True
return False
conn.add_listener(async_add_service)
class HomeKitGarageDoorCover(HomeKitEntity, CoverDevice):
"""Representation of a HomeKit Garage Door."""
def __init__(self, accessory, discovery_info):
"""Initialise the Cover."""
super().__init__(accessory, discovery_info)
self._state = None
self._obstruction_detected = None
self.lock_state = None
@property
def device_class(self):
"""Define this cover as a garage door."""
return "garage"
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
# pylint: disable=import-error
from homekit.model.characteristics import CharacteristicsTypes
return [
CharacteristicsTypes.DOOR_STATE_CURRENT,
CharacteristicsTypes.DOOR_STATE_TARGET,
CharacteristicsTypes.OBSTRUCTION_DETECTED,
]
def _update_door_state_current(self, value):
self._state = CURRENT_GARAGE_STATE_MAP[value]
def _update_obstruction_detected(self, value):
self._obstruction_detected = value
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self._state == STATE_CLOSED
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
async def async_open_cover(self, **kwargs):
"""Send open command."""
await self.set_door_state(STATE_OPEN)
async def async_close_cover(self, **kwargs):
"""Send close command."""
await self.set_door_state(STATE_CLOSED)
async def set_door_state(self, state):
"""Send state command."""
characteristics = [
{
"aid": self._aid,
"iid": self._chars["door-state.target"],
"value": TARGET_GARAGE_STATE_MAP[state],
}
]
await self._accessory.put_characteristics(characteristics)
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
if self._obstruction_detected is None:
return None
return {"obstruction-detected": self._obstruction_detected}
class HomeKitWindowCover(HomeKitEntity, CoverDevice):
"""Representation of a HomeKit Window or Window Covering."""
def __init__(self, accessory, discovery_info):
"""Initialise the Cover."""
super().__init__(accessory, discovery_info)
self._state = None
self._position = None
self._tilt_position = None
self._obstruction_detected = None
self.lock_state = None
self._features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
# pylint: disable=import-error
from homekit.model.characteristics import CharacteristicsTypes
return [
CharacteristicsTypes.POSITION_STATE,
CharacteristicsTypes.POSITION_CURRENT,
CharacteristicsTypes.POSITION_TARGET,
CharacteristicsTypes.POSITION_HOLD,
CharacteristicsTypes.VERTICAL_TILT_CURRENT,
CharacteristicsTypes.VERTICAL_TILT_TARGET,
CharacteristicsTypes.HORIZONTAL_TILT_CURRENT,
CharacteristicsTypes.HORIZONTAL_TILT_TARGET,
CharacteristicsTypes.OBSTRUCTION_DETECTED,
]
def _setup_position_hold(self, char):
self._features |= SUPPORT_STOP
def _setup_vertical_tilt_current(self, char):
self._features |= (
SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION
)
def _setup_horizontal_tilt_current(self, char):
self._features |= (
SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION
)
def _update_position_state(self, value):
self._state = CURRENT_WINDOW_STATE_MAP[value]
def _update_position_current(self, value):
self._position = value
def _update_vertical_tilt_current(self, value):
self._tilt_position = value
def _update_horizontal_tilt_current(self, value):
self._tilt_position = value
def _update_obstruction_detected(self, value):
self._obstruction_detected = value
@property
def supported_features(self):
"""Flag supported features."""
return self._features
@property
def current_cover_position(self):
"""Return the current position of cover."""
return self._position
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self._position == 0
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
async def async_stop_cover(self, **kwargs):
"""Send hold command."""
characteristics = [
{"aid": self._aid, "iid": self._chars["position.hold"], "value": 1}
]
await self._accessory.put_characteristics(characteristics)
async def async_open_cover(self, **kwargs):
"""Send open command."""
await self.async_set_cover_position(position=100)
async def async_close_cover(self, **kwargs):
"""Send close command."""
await self.async_set_cover_position(position=0)
async def async_set_cover_position(self, **kwargs):
"""Send position command."""
position = kwargs[ATTR_POSITION]
characteristics = [
{"aid": self._aid, "iid": self._chars["position.target"], "value": position}
]
await self._accessory.put_characteristics(characteristics)
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt."""
return self._tilt_position
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
tilt_position = kwargs[ATTR_TILT_POSITION]
if "vertical-tilt.target" in self._chars:
characteristics = [
{
"aid": self._aid,
"iid": self._chars["vertical-tilt.target"],
"value": tilt_position,
}
]
await self._accessory.put_characteristics(characteristics)
elif "horizontal-tilt.target" in self._chars:
characteristics = [
{
"aid": self._aid,
"iid": self._chars["horizontal-tilt.target"],
"value": tilt_position,
}
]
await self._accessory.put_characteristics(characteristics)
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
state_attributes = {}
if self._obstruction_detected is not None:
state_attributes["obstruction-detected"] = self._obstruction_detected
return state_attributes
|
|
import socket
import re
import pickle
import urllib2
import time
import os
import csv
import math
#Global variables
irc = socket.socket ( socket.AF_INET, socket.SOCK_STREAM )
global milkcount
global dataTime
global notesDic
global officerEmails
global officerNames
global officerOrder
#Sets up the IRC for the rest of the program
def setUpIRC():
global milkcount
global dataTime
network = 'irc.freenode.net'
port = 6667
irc.connect ( ( network, port ) )
print irc.recv ( 4096 )
irc.send ( 'NICK botty\r\n' )
irc.send ( 'USER botty botty botty :Python IRC\r\n' )
irc.send ( 'JOIN #joshtest\r\n' )
irc.send ( 'PRIVMSG #joshtest :Hello World.\r\n' )
irc.send("PRIVMSG #joshtest :For a list of my commands, type '!botty help'\r\n")
try:
with open("milk.p", "rb") as m:
milkcount = pickle.load(open("milk.p", "rb"))
except IOError:
milkcount = 0
'''
try:
with open("dataTime.p", "rb") as d:
dataTime = pickle.load(open("dataTime.p", "rb"))
except IOError:
dataTime = {}
'''
#Method that runs fo the majority of the program's run time, reads the irc messages
def runIRC():
global milkcount
global notesDic
timer = time.time()
while True:
if time.time() - timer > 600:
timer = time.time()
irc.send("PRIVMSG #joshtest :For a list of my commands, type '!botty help'\r\n")
data = irc.recv ( 4096 )
if re.search('PING', data, re.IGNORECASE):
irc.send ( 'PONG ' + data.split() [ 1 ] + '\r\n' )
if re.search('!botty quit', data, re.IGNORECASE):
irc.send ( 'PRIVMSG #joshtest :Fine, if you dont want me\r\n' )
irc.send ( 'QUIT\r\n' )
break
if re.search('hi botty', data, re.IGNORECASE):
irc.send ( 'PRIVMSG #joshtest :I already said hi...\r\n' )
if re.search("!help botty -fun", data, re.IGNORECASE) or re.search("!botty help -fun", data, re.IGNORECASE):
helpInstructionsFun()
elif re.search("!help botty -all", data, re.IGNORECASE) or re.search("!botty help -all", data, re.IGNORECASE):
helpInstructionsAll()
elif re.search("!help botty", data, re.IGNORECASE) or re.search("!botty help", data, re.IGNORECASE):
helpInstructions()
if re.search("botty predict", data, re.IGNORECASE):
predictPeople(data)
if re.search("botty people", data, re.IGNORECASE):
getPeople()
if re.search('hello botty', data, re.IGNORECASE):
irc.send ( 'PRIVMSG #joshtest :I already said hi...\r\n' )
if re.search('botty officers', data, re.IGNORECASE):
foundKey = False
for element in officerOrder:
if re.search("botty officers " + element, data, re.IGNORECASE):
returnOfficer(element)
foundKey = True
if foundKey == False:
getOfficers(data)
if re.search('KICK', data, re.IGNORECASE):
irc.send ( 'JOIN #joshtest\r\n' )
if re.search("botty notes", data, re.IGNORECASE):
foundNote = False
for element in notesDic:
if re.search("botty notes " + element, data, re.IGNORECASE):
getNote(element)
foundNote = True
if re.search("botty notes -titles", data, re.IGNORECASE) or re.search("botty notes -title", data, re.IGNORECASE):
noteOptions(data)
foundNote = True
if foundNote == False and re.search("botty notes\r\n", data, re.IGNORECASE):
giveNotes(data)
elif foundNote == False:
irc.send("PRIVMSG #joshtest :Command not recognized. Try 'botty notes' for a the full list of notes, 'botty notes -title' for a list of note titles, or 'botty notes [TITLE HERE]' for an individual note entry\r\n")
if re.search("cheese", data, re.IGNORECASE):
irc.send ( 'PRIVMSG #joshtest :WHERE!!!!!!\r\n' )
if re.search("milk", data, re.IGNORECASE):
milkcount += 1
if milkcount == 1:
irc.send ( "PrIVMSG #joshtest :I've now drunk " + str(milkcount) + " gallon of milk\r\n")
else:
irc.send ( "PrIVMSG #joshtest :I've now drunk " + str(milkcount) + " gallons of milk\r\n")
if re.search('slaps botty', data, re.IGNORECASE):
irc.send ( 'PRIVMSG #joshtest :This is the Trout Protection Agency. Please put the Trout Down and walk away with your hands in the air.\r\n')
if re.search('botty acm', data, re.IGNORECASE):
irc.send("PRIVMSG #joshtest :http://polaris.acm.jhu.edu/\r\n")
#irc.send( 'PrIVMSG #joshtest :http://polaris.acm.jhu.edu/motion/thread2/lastimage.jpg?time=1474063328843\r\n PrIVMSG #joshtest :http://polaris.acm.jhu.edu/motion/thread1/lastimage.jpg?time=1474064133272\r\n')
print data
# Returns all of the help instructions
def helpInstructionsAll():
irc.send('PRIVMSG #joshtest :This bot was designed to help monitor the number of people in G-67.\r\n')
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type '!help botty' or '!botty help' to get instructions on how to use the useful commands only.\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Add a '-fun' tag to get the instructions for fun commands only.\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Add a '-all' tag to get all of the instructions for every command.\r\n")
time.sleep(1)
irc.send('PRIVMSG #joshtest :Here are the commands you can use--\r\n')
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'hi botty' or 'hello botty' to say hi to me\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'milk' to make me drink some milk\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'slaps botty' to get in some trouble\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'cheese' to make me hunt for cheese\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty acm' to see the acm live\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty people' to get a count on how many people are in G-67\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty notes' to get the SysAdmin notes messaged directly to you\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty notes [TITLE HERE]' to return a specific SysAdmin note\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty notes -title' to get the list of SysAdmin note titles messaged to you\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty officers' to get the list of officers and their email addresses sent to you\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty officers [POSITION HERE]' to get that officer's name and email address\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty predict hh:mm' to get a prediction of how many people will be in G-67 at a given data and time. Note, we are still gather data for this method.\r\n")
# Returns the fun help instructions
def helpInstructionsFun():
irc.send('PRIVMSG #joshtest :This bot was designed to help monitor the number of people in G-67, but it also has fun commands.\r\n')
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type '!help botty' or '!botty help' to get instructions on how to use the useful commands only.\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Add a '-all' tag to get all of the instructions for every command.\r\n")
time.sleep(1)
irc.send('PRIVMSG #joshtest :Here are the fun commands you can use--\r\n')
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'hi botty' or 'hello botty' to say hi to me\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'milk' to make me drink some milk\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'slaps botty' to get in some trouble\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'cheese' to make me hunt for cheese\r\n")
# Returns the help instructions
def helpInstructions():
irc.send('PRIVMSG #joshtest :This bot was designed to help monitor the number of people in G-67.\r\n')
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type '!help botty' or '!botty help' to get instructions on how to use the useful commands only.\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Add a '-fun' tag to get the instructions for fun commands only.\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Add a '-all' tag to get all of the instructions for every command.\r\n")
time.sleep(1)
irc.send('PRIVMSG #joshtest :Here are the commands you can use--\r\n')
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty acm' to see the acm live\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty people' to get a count on how many people are in G-67\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty notes' to get the SysAdmin notes messaged directly to you\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty notes [TITLE HERE]' to return a specific SysAdmin note\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty notes -title' to get the list of SysAdmin note titles messaged to you\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty officers' to get the list of officers and their email addresses sent to you\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty officers [POSITION HERE]' to get that officer's name and email address\r\n")
time.sleep(1)
irc.send("PRIVMSG #joshtest :Type 'botty predict hh:mm' to get a prediction of how many people will be in G-67 at a given data and time. Note, we are still gathering data for this method.\r\n")
# Sets up the dataTime dictionary for predicting
def setupPeople():
global dataTime
people = open("people.txt", 'r')
for line in people:
lineLoc = line.find('|')
numPeople = line[lineLoc + 1:].strip('\n')
dateTime = line[:lineLoc]
commaLoc = dateTime.find(',')
date = dateTime[:commaLoc]
time = dateTime[commaLoc + 1:]
timeShort = time[:-3]
minutesCol = timeShort.find(':')
minutes = int(timeShort[minutesCol+1:])
minutes = int(round(minutes, -1))
if minutes == 0:
minutes = "00"
else:
minutes = str(minutes)
timeShort = timeShort[:minutesCol] + ":" + minutes
if timeShort not in dataTime:
dataTime[timeShort] = [int(numPeople)]
else:
dataTime[timeShort].append(int(numPeople))
people.close()
# Predicts the number of people that will be in G-67 at a given time
def predictPeople(message):
global dataTime
dataTime = {}
setupPeople()
timeLoc = message.find("botty predict ")
timeLen = len("botty predict ")
time = message[timeLoc+timeLen:].strip('\r')
time = time.strip('\n')
if ':' not in time:
irc.send("PRIVMSG #joshtest :Please submit your request in the following format: 'botty predict hh:mm'\r\n")
return
minutesCol = time.find(':')
minutes = int(time[minutesCol+1:])
minutes = int(round(minutes, -1))
if minutes == 0:
minutes = "00"
else:
minutes = str(minutes)
time = time[:minutesCol] + ":" + minutes
if time not in dataTime:
irc.send("PRIVMSG #joshtest :Please submit your request in the following format: 'botty predict hh:mm'\r\n")
return
else:
length = len(dataTime[time])
sumPeople = 0
for element in dataTime[time]:
sumPeople += element
numPeople = float(sumPeople) / float(length)
numPeople = round(numPeople, 1)
irc.send("PRIVMSG #joshtest :We predict that " + str(numPeople) + " will be in G-67 at " + time +"\r\n")
irc.send("PRIVMSG #joshtest :More will be written soon to allow day predictions as well\r\n")
# Gets the number of people that were in G-67 most recently
def getPeople():
people = open("people.txt", 'r')
for line in people:
pass
lineLoc = line.find('|')
numPeople = line[lineLoc + 1:].strip('\n')
dateTime = line[:lineLoc]
commaLoc = dateTime.find(',')
date = dateTime[:commaLoc]
time = dateTime[commaLoc + 1:]
irc.send("PRIVMSG #joshtest :The number of people in G-67 at " + time + " on " + date + " was " + numPeople + " people.\r\n")
people.close()
# Sends all of the admin notes to a user in a private message
def giveNotes(message):
global notesDic
endNameloc = message.find("!")
name = message[1:endNameloc]
urlshort = "https://www.acm.jhu.edu/~admins.pub/systems/"
irc.send("PRIVMSG #joshtest :Full list of notes are being private messaged to " + name + "\r\n")
irc.send("PRIVMSG #joshtest :Please wait " + str(len(notesDic)) + " seconds before giving me another command\r\n")
irc.send("PRIVMSG " + name + " :hello " + name + "! Here are the list of notes I can give you\r\n")
neededLines = open("neededLines.txt", 'r')
for line in neededLines:
startPos = line.find('ml">')
endPos = line.find('</a')
note = line[startPos+4:endPos]
note = note.replace('’', "'")
hrefLoc = line.find('href')
noteurl = urlshort+ line[hrefLoc+6:startPos+2]
if 'toctree-l1' in line:
irc.send("PRIVMSG " + name + " : " + note + ": " + noteurl + "\r\n")
elif 'toctree-l2' in line:
irc.send("PRIVMSG " + name + " : --" + note + ": " + noteurl + "\r\n")
elif 'toctree-l3' in line:
irc.send("PRIVMSG " + name + " : ----" + note + ": " + noteurl + "\r\n")
elif 'toctree-l4' in line:
irc.send("PRIVMSG " + name + " : ------" + note + ": " + noteurl + "\r\n")
elif 'toctree-l5' in line:
irc.send("PRIVMSG " + name + " : --------" + note + ": " + noteurl + "\r\n")
time.sleep(1)
neededLines.close()
# Prints a given admin note to the IRC channel
def getNote(note):
global notesDic
irc.send("PRIVMSG #joshtest :Here are the notes for " + note + " --> " + notesDic[note] + "\r\n")
# Sends the titles of all of the admin notes to the user in a private message
def noteOptions(message):
global notesDic
endNameloc = message.find("!")
name = message[1:endNameloc]
numNotes = len(notesDic)
arrKeys = []
i = 0
numKey = 0
for key in notesDic:
if numKey == 0:
arrKeys.append([key])
numKey += 1
else:
arrKeys[i].append(key)
numKey += 1
if numKey == 8:
numKey = 0
i += 1
irc.send("PRIVMSG #joshtest :The list of available notes is being private messaged to " + name + "\r\n")
irc.send("PRIVMSG " + name + " :These are all of the available notes:\r\n")
start = 1
end = start + 7
for element in arrKeys:
strTitles = str(element).strip('[')
strTitles = strTitles.strip(']')
irc.send("PRIVMSG " + name + " :Titles " + str(start) + " through " + str(end) + ": "+ strTitles + "\r\n")
start = end + 1
end = end + 8
if end > numNotes:
end = numNotes
time.sleep(1)
irc.send("PRIVMSG " + name + " :To get the notes for one of these, please type 'botty notes [TITLE HERE]'\r\n")
# Sets up the dictionaries and array for officers
def setupOfficers():
global officerNames
global officerEmails
global officerOrder
officerNames = {}
officerEmails = {}
officerOrder = []
c = open("officerInfo.csv")
csv_c = csv.reader(c)
for line in csv_c:
officerNames[line[0]] = line[1]
officerEmails[line[0]] = line[2]
officerOrder.append(line[0])
# Sends the list of officers and email addresses to the user in a private message
def getOfficers(message):
global officerNames
global officerEmails
global officerOrder
endNameloc = message.find("!")
name = message[1:endNameloc]
irc.send("PRIVMSG #joshtest :List of officers being sent privately to " + name + "\r\n")
irc.send("PRIVMSG " + name + " :The officers are as follows:\r\n")
i = 0
for element in officerOrder:
time.sleep(1)
irc.send("PRIVMSG " + name + " :" + officerOrder[i] + ": " + officerNames[officerOrder[i]] + " (" + officerEmails[officerOrder[i]] + ")\r\n")
i += 1
irc.send("PRIVMSG " + name + " :To email all of officers, please email '[email protected]'\r\n")
# Prints the given officer's name and email to the channel
def returnOfficer(title):
global officerNames
global officerEmails
irc.send("PRIVMSG #joshtest :Here is the information on the " + title + ": " + officerNames[title] + " (" + officerEmails[title] + ")\r\n")
irc.send("PRIVMSG #joshtest :To email all officers, please email '[email protected]'\r\n")
# Creates the notes dictionary
def buildNotes():
global notesDic
notesDic = {}
url = "https://www.acm.jhu.edu/~admins.pub/systems/index.html"
urlshort = "https://www.acm.jhu.edu/~admins.pub/systems/"
f = urllib2.urlopen(url)
htmlRaw = open("htmlRaw.txt", 'w')
htmlRaw.write(f.read())
htmlRaw.close()
htmlRaw = open("htmlRaw.txt", 'r')
neededLines = open("neededLines.txt", 'w')
for line in htmlRaw:
if 'li class="toctree' in line:
neededLines.write(line)
htmlRaw.close()
neededLines.close()
neededLines = open("neededLines.txt", 'r')
for line in neededLines:
startPos = line.find('ml">')
endPos = line.find('</a')
note = line[startPos+4:endPos]
note = note.replace('’', "'")
hrefLoc = line.find('href')
noteurl = urlshort+ line[hrefLoc+6:startPos+2]
notesDic[note] = noteurl
neededLines.close()
os.remove("htmlRaw.txt")
# Runs the program
if __name__ == '__main__':
buildNotes()
setupOfficers()
setUpIRC()
runIRC()
print("Safely exited")
pickle.dump(milkcount, open("milk.p", "wb"))
#pickle.dump(dataTime, open("dataTime.p", "wb"))
os.remove("neededLines.txt")
|
|
"""
Template dispatcher module.
A dispatcher is a function that can contains multiple behaviors.
Its specific behavior is can be controlled by DispatchContext.
DispatchContext is used in two ways, usually via different implementation
of the DispatchContext base class.
- During search, we can use it to pass the current proposal from tuner.
- During evaluation, we can use it to set pick the best policy.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
import logging
import numpy as np
from decorator import decorate
from tvm import target as _target
from .space import FallbackConfigEntity
logger = logging.getLogger('autotvm')
class DispatchContext(object):
"""
Base class of dispatch context.
DispatchContext enables the target and workload
specific dispatch mechanism for templates.
"""
current = None
def __init__(self):
self._old_ctx = DispatchContext.current
def query(self, target, workload):
"""
Query the context to get the specific config for a template.
If cannot find the result inside this context, this function will query it
from the upper contexts.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
ret = self._query_inside(target, workload)
if ret is None:
ret = self._old_ctx.query(target, workload)
return ret
def _query_inside(self, target, workload):
"""
Query the context to get the specific config for a template.
This function only query config inside this context.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
raise NotImplementedError()
def __enter__(self):
self._old_ctx = DispatchContext.current
DispatchContext.current = self
return self
def __exit__(self, ptype, value, trace):
DispatchContext.current = self._old_ctx
def dispatcher(fworkload):
"""Wrap a workload dispatcher function.
Parameters
----------
fworkload : function
The workload extraction function from arguments.
Returns
-------
fdispatcher : function
A wrapped dispatcher function, which will
dispatch based on DispatchContext and
the current workload.
"""
dispatch_dict = {}
func_name = fworkload.__name__
def register(key, func=None, override=False):
"""Register template function.
Parameters
----------
key : str or List of str
The template key to identify the template
under this dispatcher.
func : function
The function to be registered.
The first argument of the function is always
cfg returned by DispatchContext,
the rest arguments are the same as the fworkload.
override : bool
Whether override existing registration.
Returns
-------
The register function if necessary.
"""
if isinstance(key, str):
key = [key]
def _do_reg(myf):
for x in key:
if x in dispatch_dict and not override:
raise ValueError(
"Key %s is already registered for %s" % (x, func_name))
dispatch_dict[x] = myf
return myf
if func:
return _do_reg(func)
return _do_reg
def dispatch_func(func, *args, **kwargs):
"""The wrapped dispatch function"""
tgt = _target.current_target()
workload = func(*args, **kwargs)
cfg = DispatchContext.current.query(tgt, workload)
if cfg.is_fallback and not cfg.template_key:
# first try 'direct' template
if 'direct' in dispatch_dict:
return dispatch_dict['direct'](cfg, *args, **kwargs)
# otherwise pick a random template
for v in dispatch_dict.values():
return v(cfg, *args, **kwargs)
else:
return dispatch_dict[cfg.template_key](cfg, *args, **kwargs)
fdecorate = decorate(fworkload, dispatch_func)
fdecorate.register = register
return fdecorate
class ApplyConfig(DispatchContext):
"""Apply a deterministic config entity for all queries.
Parameters
----------
config : ConfigSpace or ConfigEntity
The specific configuration we care about.
"""
def __init__(self, config):
super(ApplyConfig, self).__init__()
self._config = config
self.workload = None
def _query_inside(self, target, workload):
"""Override query"""
self.workload = workload
return self._config
class ApplyHistoryBest(DispatchContext):
"""
Apply the history best config
Parameters
----------
records : str or iterator of (MeasureInput, MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
def __init__(self, records):
super(ApplyHistoryBest, self).__init__()
self.best_by_targetkey = {}
self.best_by_model = {}
if records:
self.load(records)
def load(self, records):
"""Load records to this dispatch context
Parameters
----------
records : str or iterator of (MeasureInput, MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
from ..record import load_from_file
if isinstance(records, str):
records = load_from_file(records)
if not records:
return
best_by_targetkey = self.best_by_targetkey
best_by_model = self.best_by_model
counter = 0
for inp, res in records:
counter += 1
if res.error_no != 0:
continue
# use target keys in tvm target system as key to build best map
for k in inp.target.keys:
key = (k, inp.task.workload)
if key not in best_by_targetkey:
best_by_targetkey[key] = (inp, res)
else:
_, other_res = best_by_targetkey[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_targetkey[key] = (inp, res)
# use model as key to build best map
for opt in inp.target.options:
if opt.startswith("-model"):
model = opt[7:]
key = (model, inp.task.workload)
if key not in best_by_model:
best_by_model[key] = (inp, res)
else:
_, other_res = best_by_model[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_model[key] = (inp, res)
break
logger.debug("Finish loading %d records", counter)
def _query_inside(self, target, workload):
if target is None:
raise RuntimeError("Need a target context to find the history best. "
"Hint: If your target is llvm, use `with tvm.target.create('llvm'):`"
" above the dispatcher call. So does other target. ")
# first try matching by model
for opt in target.options:
if opt.startswith("-model"):
model = opt[7:]
key = (model, workload)
if key in self.best_by_model:
return self.best_by_model[key][0].config
# then try matching by target key
for k in target.keys:
key = (k, workload)
if key in self.best_by_targetkey:
return self.best_by_targetkey[key][0].config
return None
class FallbackContext(DispatchContext):
"""
A fallback dispatch context.
Any tunable template can be called under this context.
This is the root context.
"""
def __init__(self):
super(FallbackContext, self).__init__()
self.memory = {}
self.silent = False
# a set to prevent print duplicated message
self.messages = set()
def _query_inside(self, target, workload):
key = (str(target), workload)
if key in self.memory:
return self.memory[key]
if not self.silent:
msg = "Cannot find config for target=%s, workload=%s. A fallback configuration "\
"is used, which may bring great performance regression." % (target, workload)
if msg not in self.messages:
self.messages.add(msg)
logger.warning(msg)
cfg = FallbackConfigEntity()
# cache this config
self.memory[key] = cfg
return cfg
def clear_cache(self, target, workload):
"""Clear fallback cache. Pass the same argument as _query_inside to this function
to clean the cache.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
"""
key = (str(target), workload)
if key in self.memory:
del self.memory[key]
DispatchContext.current = FallbackContext()
def clear_fallback_cache(target, workload):
"""Clear fallback cache. Pass the same argument as _query_inside to this function
to clean the cache.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Note
----
This is used in alter_op_layout to clear the bad cache created before call topi compute function
"""
context = DispatchContext.current
while not isinstance(context, FallbackContext):
context = context._old_ctx
context.clear_cache(target, workload)
|
|
#! /usr/bin/env python
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
#
# See profile.doc for more information
"""Class for profiling Python code."""
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import os
import time
import marshal
__all__ = ["run","help","Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats()
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'profile.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "profile.doc"',
print 'along the Python search path'
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions
[ 2] = Cumulative time spent in this frame's function, including time in
all subfunctions to this frame.
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling)
[-1] = Our parent 6-tuple (corresponds to frame.f_back)
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[4].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[5] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
def __init__(self, timer=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.dispatch = { \
'call' : self.trace_dispatch_call, \
'return' : self.trace_dispatch_return, \
'exception': self.trace_dispatch_exception, \
}
if not timer:
if os.name == 'mac':
import MacOS
self.timer = MacOS.GetTicks
self.dispatcher = self.trace_dispatch_mac
self.get_time = self.get_time_mac
elif hasattr(time, 'clock'):
self.timer = time.clock
self.dispatcher = self.trace_dispatch_i
elif hasattr(os, 'times'):
self.timer = os.times
self.dispatcher = self.trace_dispatch
else:
self.timer = time.time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
if len(t) == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
except TypeError:
self.dispatcher = self.trace_dispatch_i
self.t = self.get_time()
self.simulate_call('profiler')
def get_time(self): # slow simulation of method to acquire time
t = self.timer()
if type(t) == type(()) or type(t) == type([]):
t = reduce(lambda x,y: x+y, t, 0)
return t
def get_time_mac(self):
return self.timer()/60.0
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
t = self.timer()
t = t[0] + t[1] - self.t # No Calibration constant
# t = t[0] + t[1] - self.t - .00053 # Calibration constant
if self.dispatch[event](frame,t):
t = self.timer()
self.t = t[0] + t[1]
else:
r = self.timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
return
# Dispatch routine for best timer program (return = scalar integer)
def trace_dispatch_i(self, frame, event, arg):
t = self.timer() - self.t # - 1 # Integer calibration constant
if self.dispatch[event](frame,t):
self.t = self.timer()
else:
self.t = self.timer() - t # put back unrecorded delta
return
# Dispatch routine for macintosh (timer returns time in ticks of 1/60th second)
def trace_dispatch_mac(self, frame, event, arg):
t = self.timer()/60.0 - self.t # - 1 # Integer calibration constant
if self.dispatch[event](frame,t):
self.t = self.timer()/60.0
else:
self.t = self.timer()/60.0 - t # put back unrecorded delta
return
# SLOW generic dispatch routine for timer returning lists of numbers
def trace_dispatch_l(self, frame, event, arg):
t = self.get_time() - self.t
if self.dispatch[event](frame,t):
self.t = self.get_time()
else:
self.t = self.get_time()-t # put back unrecorded delta
return
def trace_dispatch_exception(self, frame, t):
rt, rtt, rct, rfn, rframe, rcur = self.cur
if (not rframe is frame) and rcur:
return self.trace_dispatch_return(rframe, t)
return 0
def trace_dispatch_call(self, frame, t):
fcode = frame.f_code
fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
if self.timings.has_key(fn):
cc, ns, tt, ct, callers = self.timings[fn]
self.timings[fn] = cc, ns + 1, tt, ct, callers
else:
self.timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
# if not frame is self.cur[-2]: raise "Bad return", self.cur[3]
# Prefix "r" means part of the Returning or exiting frame
# Prefix "p" means part of the Previous or older frame
rt, rtt, rct, rfn, frame, rcur = self.cur
rtt = rtt + t
sft = rtt + rct
pt, ptt, pct, pfn, pframe, pcur = rcur
self.cur = pt, ptt+rt, pct+sft, pfn, pframe, pcur
cc, ns, tt, ct, callers = self.timings[rfn]
if not ns:
ct = ct + sft
cc = cc + 1
if callers.has_key(pfn):
callers[pfn] = callers[pfn] + 1 # hack: gather more
# stats such as the amount of time added to ct courtesy
# of this specific call, and the contribution to cc
# courtesy of this call.
else:
callers[pfn] = 1
self.timings[rfn] = cc, ns - 1, tt+rtt, ct, callers
return 1
# The next few function play with self.cmd. By carefully preloading
# our parallel stack, we can force the profiled result to include
# an arbitrary string as the name of the calling function.
# We use self.cmd as that string, and the resulting stats look
# very nice :-).
def set_cmd(self, cmd):
if self.cur[-1]: return # already set
self.cmd = cmd
self.simulate_call(cmd)
class fake_code:
def __init__(self, filename, line, name):
self.co_filename = filename
self.co_line = line
self.co_name = name
self.co_firstlineno = 0
def __repr__(self):
return repr((self.co_filename, self.co_line, self.co_name))
class fake_frame:
def __init__(self, code, prior):
self.f_code = code
self.f_back = prior
def simulate_call(self, name):
code = self.fake_code('profile', 0, name)
if self.cur:
pframe = self.cur[-2]
else:
pframe = None
frame = self.fake_frame(code, pframe)
a = self.dispatch['call'](frame, 0)
return
# collect stats from pending stack, including getting final
# timings for self.cmd frame.
def simulate_cmd_complete(self):
t = self.get_time() - self.t
while self.cur[-1]:
# We *can* cause assertion errors here if
# dispatch_trace_return checks for a frame match!
a = self.dispatch['return'](self.cur[-2], t)
t = 0
self.t = self.get_time() - t
def print_stats(self):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(-1). \
print_stats()
def dump_stats(self, file):
f = open(file, 'wb')
self.create_stats()
marshal.dump(self.stats, f)
f.close()
def create_stats(self):
self.simulate_cmd_complete()
self.snapshot_stats()
def snapshot_stats(self):
self.stats = {}
for func in self.timings.keys():
cc, ns, tt, ct, callers = self.timings[func]
callers = callers.copy()
nc = 0
for func_caller in callers.keys():
nc = nc + callers[func_caller]
self.stats[func] = cc, nc, tt, ct, callers
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.set_cmd(cmd)
sys.setprofile(self.dispatcher)
try:
exec cmd in globals, locals
finally:
sys.setprofile(None)
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args):
self.set_cmd(`func`)
sys.setprofile(self.dispatcher)
try:
return apply(func, args)
finally:
sys.setprofile(None)
#******************************************************************
# The following calculates the overhead for using a profiler. The
# problem is that it takes a fair amount of time for the profiler
# to stop the stopwatch (from the time it receives an event).
# Similarly, there is a delay from the time that the profiler
# re-starts the stopwatch before the user's code really gets to
# continue. The following code tries to measure the difference on
# a per-event basis. The result can the be placed in the
# Profile.dispatch_event() routine for the given platform. Note
# that this difference is only significant if there are a lot of
# events, and relatively little user code per event. For example,
# code with small functions will typically benefit from having the
# profiler calibrated for the current platform. This *could* be
# done on the fly during init() time, but it is not worth the
# effort. Also note that if too large a value specified, then
# execution time on some functions will actually appear as a
# negative number. It is *normal* for some functions (with very
# low call counts) to have such negative stats, even if the
# calibration figure is "correct."
#
# One alternative to profile-time calibration adjustments (i.e.,
# adding in the magic little delta during each event) is to track
# more carefully the number of events (and cumulatively, the number
# of events during sub functions) that are seen. If this were
# done, then the arithmetic could be done after the fact (i.e., at
# display time). Currently, we track only call/return events.
# These values can be deduced by examining the callees and callers
# vectors for each functions. Hence we *can* almost correct the
# internal time figure at print time (note that we currently don't
# track exception event processing counts). Unfortunately, there
# is currently no similar information for cumulative sub-function
# time. It would not be hard to "get all this info" at profiler
# time. Specifically, we would have to extend the tuples to keep
# counts of this in each frame, and then extend the defs of timing
# tuples to include the significant two figures. I'm a bit fearful
# that this additional feature will slow the heavily optimized
# event/time ratio (i.e., the profiler would run slower, fur a very
# low "value added" feature.)
#
# Plugging in the calibration constant doesn't slow down the
# profiler very much, and the accuracy goes way up.
#**************************************************************
def calibrate(self, m):
# Modified by Tim Peters
n = m
s = self.get_time()
while n:
self.simple()
n = n - 1
f = self.get_time()
my_simple = f - s
#print "Simple =", my_simple,
n = m
s = self.get_time()
while n:
self.instrumented()
n = n - 1
f = self.get_time()
my_inst = f - s
# print "Instrumented =", my_inst
avg_cost = (my_inst - my_simple)/m
#print "Delta/call =", avg_cost, "(profiler fixup constant)"
return avg_cost
# simulate a program with no profiler activity
def simple(self):
a = 1
pass
# simulate a program with call/return event processing
def instrumented(self):
a = 1
self.profiler_simulation(a, a, a)
# simulate an event processing activity (from user's perspective)
def profiler_simulation(self, x, y, z):
t = self.timer()
## t = t[0] + t[1]
self.ut = t
class OldProfile(Profile):
"""A derived profiler that simulates the old style profile, providing
errant results on recursive functions. The reason for the usefulness of
this profiler is that it runs faster (i.e., less overhead). It still
creates all the caller stats, and is quite useful when there is *no*
recursion in the user's code.
This code also shows how easy it is to create a modified profiler.
"""
def trace_dispatch_exception(self, frame, t):
rt, rtt, rct, rfn, rframe, rcur = self.cur
if rcur and not rframe is frame:
return self.trace_dispatch_return(rframe, t)
return 0
def trace_dispatch_call(self, frame, t):
fn = `frame.f_code`
self.cur = (t, 0, 0, fn, frame, self.cur)
if self.timings.has_key(fn):
tt, ct, callers = self.timings[fn]
self.timings[fn] = tt, ct, callers
else:
self.timings[fn] = 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
rt, rtt, rct, rfn, frame, rcur = self.cur
rtt = rtt + t
sft = rtt + rct
pt, ptt, pct, pfn, pframe, pcur = rcur
self.cur = pt, ptt+rt, pct+sft, pfn, pframe, pcur
tt, ct, callers = self.timings[rfn]
if callers.has_key(pfn):
callers[pfn] = callers[pfn] + 1
else:
callers[pfn] = 1
self.timings[rfn] = tt+rtt, ct + sft, callers
return 1
def snapshot_stats(self):
self.stats = {}
for func in self.timings.keys():
tt, ct, callers = self.timings[func]
callers = callers.copy()
nc = 0
for func_caller in callers.keys():
nc = nc + callers[func_caller]
self.stats[func] = nc, nc, tt, ct, callers
class HotProfile(Profile):
"""The fastest derived profile example. It does not calculate
caller-callee relationships, and does not calculate cumulative
time under a function. It only calculates time spent in a
function, so it runs very quickly due to its very low overhead.
"""
def trace_dispatch_exception(self, frame, t):
rt, rtt, rfn, rframe, rcur = self.cur
if rcur and not rframe is frame:
return self.trace_dispatch_return(rframe, t)
return 0
def trace_dispatch_call(self, frame, t):
self.cur = (t, 0, frame, self.cur)
return 1
def trace_dispatch_return(self, frame, t):
rt, rtt, frame, rcur = self.cur
rfn = `frame.f_code`
pt, ptt, pframe, pcur = rcur
self.cur = pt, ptt+rt, pframe, pcur
if self.timings.has_key(rfn):
nc, tt = self.timings[rfn]
self.timings[rfn] = nc + 1, rt + rtt + tt
else:
self.timings[rfn] = 1, rt + rtt
return 1
def snapshot_stats(self):
self.stats = {}
for func in self.timings.keys():
nc, tt = self.timings[func]
self.stats[func] = nc, nc, tt, 0, {}
#****************************************************************************
def Stats(*args):
print 'Report generating functions are in the "pstats" module\a'
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
import sys
import os
if not sys.argv[1:]:
print "usage: profile.py scriptfile [arg] ..."
sys.exit(2)
filename = sys.argv[1] # Get script filename
del sys.argv[0] # Hide "profile.py" from argument list
# Insert script directory in front of module search path
sys.path.insert(0, os.path.dirname(filename))
run('execfile(' + `filename` + ')')
|
|
from oslo.config import cfg
import addons
src = cfg.OptGroup(name='src',
title='Credentials and general config for source cloud')
src_opts = [
cfg.StrOpt('type', default='os',
help='os - OpenStack Cloud'),
cfg.StrOpt('auth_url', default='-',
help='Keystone service endpoint for authorization'),
cfg.StrOpt('host', default='-',
help='ip-address controller for cloud'),
cfg.StrOpt('ssh_host', default='',
help='ip-address of cloud node for ssh connect'),
cfg.StrOpt('ext_cidr', default='',
help='external network CIDR'),
cfg.StrOpt('user', default='-',
help='user for access to API'),
cfg.StrOpt('password', default='-',
help='password for access to API'),
cfg.StrOpt('tenant', default='-',
help='tenant for access to API'),
cfg.StrOpt('service_tenant', default='services',
help='Tenant name for services'),
cfg.StrOpt('ssh_user', default='root',
help='user to connect via ssh'),
cfg.StrOpt('ssh_sudo_password', default='',
help='sudo password to connect via ssh, if any')
]
dst = cfg.OptGroup(name='dst',
title='Credentials and general '
'config for destination cloud')
dst_opts = [
cfg.StrOpt('type', default='os',
help='os - OpenStack Cloud'),
cfg.StrOpt('auth_url', default='-',
help='Keystone service endpoint for authorization'),
cfg.StrOpt('host', default='-',
help='ip-address controller for cloud'),
cfg.StrOpt('ssh_host', default='',
help='ip-address of cloud node for ssh connect'),
cfg.StrOpt('ext_cidr', default='',
help='external network CIDR'),
cfg.StrOpt('user', default='-',
help='user for access to API'),
cfg.StrOpt('password', default='-',
help='password for access to API'),
cfg.StrOpt('tenant', default='-',
help='tenant for access to API'),
cfg.StrOpt('service_tenant', default='services',
help='Tenant name for services'),
cfg.StrOpt('ssh_user', default='root',
help='user to connect via ssh'),
cfg.StrOpt('ssh_sudo_password', default='',
help='sudo password to connect via ssh, if any')
]
migrate = cfg.OptGroup(name='migrate',
title='General config for migration process')
migrate_opts = [
cfg.BoolOpt('keep_user_passwords', default=True,
help='True - keep user passwords, '
'False - not keep user passwords'),
cfg.StrOpt('key_filename', default='id_rsa',
help='name pub key'),
cfg.BoolOpt('keep_ip', default=False,
help='yes - keep ip, no - not keep ip'),
cfg.BoolOpt('migrate_extnets', default=False,
help='yes - migrate external networks, no - do not migrate external networks'),
cfg.StrOpt('ext_net_map', default='configs/ext_net_map.yaml',
help='path to the map of external networks, which contains '
'references between old and new ids'),
cfg.BoolOpt('keep_floatingip', default=False,
help='yes - keep floatingip, no - not keep floatingip'),
cfg.StrOpt('cinder_migration_strategy',
default='cloudferrylib.os.storage.cinder_storage.CinderStorage',
help='path to class that will perform cinder migration actions'),
cfg.BoolOpt('keep_lbaas', default=False,
help='yes - keep lbaas settings, no - not keep lbaas settings'),
cfg.BoolOpt('keep_volume_snapshots', default=False,
help='yes - keep volume snapshots, no - not keep volume snapshots'),
cfg.BoolOpt('keep_volume_storage', default=False,
help='True - keep volume_storage, False - not keep volume_storage'),
cfg.StrOpt('speed_limit', default='10MB',
help='speed limit for glance to glance'),
cfg.StrOpt('instances', default='key_name-qwerty',
help='filter instance by parameters'),
cfg.StrOpt('file_compression', default='dd',
help='gzip - use GZIP when file transferring via ssh, '
' - no compression, directly via dd'),
cfg.IntOpt('level_compression', default='7',
help='level compression for gzip'),
cfg.StrOpt('ssh_transfer_port', default='9990',
help='interval ports for ssh tunnel'),
cfg.StrOpt('port', default='9990',
help='interval ports for ssh tunnel'),
cfg.BoolOpt('overwrite_user_passwords', default=False,
help='Overwrite password for exists users on destination'),
cfg.BoolOpt('migrate_quotas', default=False,
help='Migrate tenant quotas'),
cfg.StrOpt('disk_format', default='qcow2',
help='format when covert volume to image'),
cfg.StrOpt('container_format', default='bare',
help='container format when covert volume to image'),
cfg.BoolOpt('direct_compute_transfer', default=False,
help='Direct data transmission between compute nodes via external network'),
cfg.StrOpt('filter_path', default='configs/filter.yaml',
help='path to the filter yaml file with options for search resources'),
cfg.IntOpt('retry', default='7',
help='Number retry if except Performing error'),
cfg.IntOpt('time_wait', default=5,
help='Time wait if except Performing error'),
cfg.IntOpt('ssh_chunk_size', default=100,
help='Size of one chunk to transfer via SSH'),
cfg.StrOpt('group_file_path', default="vm_groups.yaml",
help='Path to file with the groups of VMs'),
cfg.BoolOpt('all_networks', default=False,
help="Migrate all network resources from all tenants"),
cfg.BoolOpt('all_volumes', default=False,
help="Migrate all volume resources from all tenants"),
cfg.BoolOpt('all_vms', default=False,
help="Migrate all VM's from all tenants. User, specified in "
"the 'dst' section of config also should have admin role "
"in all tenants."),
cfg.BoolOpt('all_images', default=False,
help='Migrate images of all tenants'),
cfg.BoolOpt('skip_down_hosts', default=True,
help="If set to True, removes unreachable compute hosts from "
"nova hypervisor list. Otherwise migration process fails "
"with unrecoverable error if host is down."),
cfg.StrOpt('scenario', default='scenario/migrate.yaml',
help='Path to a scenario file, which holds the whole migration '
'procedure. Must be YAML format'),
cfg.StrOpt('tasks_mapping', default='scenario/tasks.yaml',
help='Path to a file which holds CloudFerry python code tasks '
'mapped to migration scenario items. Items defined in '
'this file must be used in the migration scenario.'),
cfg.BoolOpt('migrate_users', default=True,
help='Migrate users'),
cfg.BoolOpt('migrate_user_quotas', default=True,
help='Migrate user quotas. If it set in "false" only tenant '
'quotas will be migrated. Use this in case when '
'OpenStack does not support user quotas (e.g. Grizzly)'),
cfg.StrOpt('incloud_live_migration', default='nova',
help='Live migration type used for in-cloud live migration. '
'Possible values: "nova", "cobalt".')
]
mail = cfg.OptGroup(name='mail',
title='Mail credentials for notifications')
mail_opts = [
cfg.StrOpt('server', default='-',
help='name mail server'),
cfg.StrOpt('username', default='-',
help='name username for mail'),
cfg.StrOpt('password', default='-',
help='password for mail'),
cfg.StrOpt('from_addr', default='-',
help='field FROM in letter')
]
src_mysql = cfg.OptGroup(name='src_mysql',
title='Config mysql for source cloud')
src_mysql_opts = [
cfg.StrOpt('user', default='-',
help='user for mysql'),
cfg.StrOpt('password', default='-',
help='password for mysql'),
cfg.StrOpt('host', default='-',
help='host of mysql'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
]
src_rabbit = cfg.OptGroup(name='src_rabbit',
title='Config RabbitMQ for source cloud')
src_rabbit_opts = [
cfg.StrOpt('user', default='guest',
help='user for RabbitMQ'),
cfg.StrOpt('password', default='guest',
help='password for RabbitMQ'),
cfg.StrOpt('hosts', default='-',
help='comma separated RabbitMQ hosts')
]
src_compute = cfg.OptGroup(name='src_compute',
title='Config service for compute')
src_compute_opts = [
cfg.StrOpt('service', default='nova',
help='name service for compute'),
cfg.StrOpt('backend', default='ceph',
help='backend for ephemeral drives'),
cfg.StrOpt('convert_diff_file', default='qcow2',
help='convert diff file to'),
cfg.StrOpt('convert_ephemeral_disk', default='qcow2',
help='convert ephemeral disk to'),
cfg.BoolOpt('disk_overcommit', default=False,
help='live-migration allow disk overcommit'),
cfg.BoolOpt('block_migration', default=False,
help='live-migration without shared_storage'),
cfg.StrOpt('host_eph_drv', default='-',
help='host ephemeral drive'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for db connection'),
cfg.StrOpt('host', default='',
help='compute mysql node ip address'),
cfg.StrOpt('database_name', default='',
help='compute database name'),
cfg.StrOpt('user', default='',
help='user for db access'),
cfg.StrOpt('password', default='',
help='password for db access'),
]
src_storage = cfg.OptGroup(name='src_storage',
title='Config service for storage')
src_storage_opts = [
cfg.StrOpt('service', default='cinder',
help='name service for storage'),
cfg.StrOpt('backend', default='iscsi',
help='backend for storage'),
cfg.StrOpt('host', default='',
help='storage node ip address'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('protocol_transfer', default='GLANCE',
help='mode transporting volumes GLANCE or SSH'),
cfg.StrOpt('disk_format', default='qcow2',
help='convert volume'),
cfg.StrOpt('volume_name_template', default='volume-',
help='template for creating names of volumes on storage backend'),
cfg.StrOpt('rbd_pool', default='volumes',
help='name of pool for volumes in Ceph RBD storage'),
cfg.StrOpt('snapshot_name_template', default='snapshot-',
help='template for creating names of snapshots on storage backend')
]
src_image = cfg.OptGroup(name='src_image',
title='Config service for images')
src_image_opts = [
cfg.StrOpt('service', default='glance',
help='name service for images'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('host', default='',
help='glance mysql node ip address'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('backend', default='file',
help='backend for images')
]
src_identity = cfg.OptGroup(name='src_identity',
title='Config service for identity')
src_identity_opts = [
cfg.StrOpt('service', default='keystone',
help='name service for keystone')
]
src_network = cfg.OptGroup(name='src_network',
title='Config service for network')
src_network_opts = [
cfg.StrOpt('service', default='auto',
help='name service for network, '
'auto - detect avaiable service')
]
src_objstorage = cfg.OptGroup(name='src_objstorage',
title='Config service for object storage')
src_objstorage_opts = [
cfg.StrOpt('service', default='swift',
help='service name for object storage')
]
dst_mysql = cfg.OptGroup(name='dst_mysql',
title='Config mysql for destination cloud')
dst_mysql_opts = [
cfg.StrOpt('user', default='-',
help='user for mysql'),
cfg.StrOpt('password', default='-',
help='password for mysql'),
cfg.StrOpt('host', default='-',
help='host of mysql'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
]
dst_rabbit = cfg.OptGroup(name='dst_rabbit',
title='Config RabbitMQ for source cloud')
dst_rabbit_opts = [
cfg.StrOpt('user', default='guest',
help='user for RabbitMQ'),
cfg.StrOpt('password', default='guest',
help='password for RabbitMQ'),
cfg.StrOpt('hosts', default='-',
help='comma separated RabbitMQ hosts')
]
dst_compute = cfg.OptGroup(name='dst_compute',
title='Config service for compute')
dst_compute_opts = [
cfg.StrOpt('service', default='nova',
help='name service for compute'),
cfg.StrOpt('backend', default='ceph',
help='backend for ephemeral drives'),
cfg.StrOpt('convert_diff_file', default='qcow2',
help='convert diff file to'),
cfg.StrOpt('convert_ephemeral_disk', default='qcow2',
help='convert ephemeral disk to'),
cfg.BoolOpt('disk_overcommit', default=False,
help='live-migration allow disk overcommit'),
cfg.BoolOpt('block_migration', default=False,
help='live-migration without shared_storage'),
cfg.StrOpt('host_eph_drv', default='-',
help='host ephemeral drive'),
cfg.FloatOpt('cpu_allocation_ratio', default='16',
help='cpu allocation ratio'),
cfg.FloatOpt('ram_allocation_ratio', default='1',
help='ram allocation ratio'),
cfg.FloatOpt('disk_allocation_ratio', default='0.9',
help='disk allocation ratio'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for db connection'),
cfg.StrOpt('host', default='',
help='compute mysql node ip address'),
cfg.StrOpt('database_name', default='',
help='compute database name'),
cfg.StrOpt('user', default='',
help='user for db access'),
cfg.StrOpt('password', default='',
help='password for db access'),
]
dst_storage = cfg.OptGroup(name='dst_storage',
title='Config service for storage')
dst_storage_opts = [
cfg.StrOpt('service', default='cinder',
help='name service for storage'),
cfg.StrOpt('backend', default='iscsi',
help='backend for storage'),
cfg.StrOpt('host', default='',
help='storage node ip address'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('protocol_transfer', default='GLANCE',
help='mode transporting volumes GLANCE or SSH'),
cfg.StrOpt('disk_format', default='qcow2',
help='convert volume'),
cfg.StrOpt('volume_name_template', default='volume-',
help='template for creating names of volumes on storage backend'),
cfg.StrOpt('rbd_pool', default='volumes',
help='name of pool for volumes in Ceph RBD storage'),
cfg.StrOpt('snapshot_name_template', default='snapshot-',
help='template for creating names of snapshots on storage backend')
]
dst_image = cfg.OptGroup(name='dst_image',
title='Config service for images')
dst_image_opts = [
cfg.StrOpt('service', default='glance',
help='name service for images'),
cfg.BoolOpt('convert_to_raw', default='True',
help='convert to raw images'),
cfg.StrOpt('host', default='',
help='glance mysql node ip address'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('backend', default='file',
help='backend for images')
]
dst_identity = cfg.OptGroup(name='dst_identity',
title='Config service for identity')
dst_identity_opts = [
cfg.StrOpt('service', default='keystone',
help='name service for keystone')
]
dst_network = cfg.OptGroup(name='dst_network',
title='Config service for network')
dst_network_opts = [
cfg.StrOpt('service', default='auto',
help='name service for network, '
'auto - detect available service'),
cfg.ListOpt('interfaces_for_instance', default='net04',
help='list interfaces for connection to instance')
]
dst_objstorage = cfg.OptGroup(name='dst_objstorage',
title='Config service for object storage')
dst_objstorage_opts = [
cfg.StrOpt('service', default='swift',
help='service name for object storage')
]
import_rules = cfg.OptGroup(name='import_rules',
title='Import Rules for '
'overwrite something fields')
import_rules_opts = [
cfg.StrOpt('key', default='',
help=''),
]
snapshot = cfg.OptGroup(name='snapshot',
title="Rules for snapshot")
snapshot_opts = [
cfg.StrOpt('snapshot_path', default="dump.sql")]
initial_check = cfg.OptGroup(name='initial_check',
title='Some configuration to initial checks')
initial_check_opts = [
cfg.IntOpt('claimed_bandwidth', default=100,
help='Claimed bandwidth of network (Mb/s).'),
cfg.FloatOpt('factor', default=0.5,
help='The percentage of the allowable loss of network speed'),
cfg.IntOpt('test_file_size', default=100,
help='Size of testing file to send/receive via network (MB).'),
]
condense = cfg.OptGroup(name='condense',
title="options for condensation")
condense_opts = [
cfg.FloatOpt('ram_reduction_coef', default=1),
cfg.FloatOpt('core_reduction_coef', default=4),
cfg.StrOpt('flavors_file', default='flavors.json'),
cfg.StrOpt('nodes_file', default='nodes.json'),
cfg.StrOpt('vms_file', default='vms.json'),
cfg.StrOpt('group_file', default='groups.yaml'),
cfg.BoolOpt('keep_interim_data', default=False,
help=("Stores interim data required for the condensation "
"process to run in files defined in `flavors_file`, "
"`nodes_file`, and `group_file` config options.")),
cfg.IntOpt('precision', default=85)]
database = cfg.OptGroup(name="database",
title="options for database")
database_opts = [
cfg.StrOpt("host", default="localhost"),
cfg.IntOpt("port", default=6379)]
cfg_for_reg = [
(src, src_opts),
(dst, dst_opts),
(migrate, migrate_opts),
(mail, mail_opts),
(src_mysql, src_mysql_opts),
(src_rabbit, src_rabbit_opts),
(src_compute, src_compute_opts),
(src_storage, src_storage_opts),
(src_identity, src_identity_opts),
(src_image, src_image_opts),
(src_network, src_network_opts),
(src_objstorage, src_objstorage_opts),
(dst_mysql, dst_mysql_opts),
(dst_rabbit, dst_rabbit_opts),
(dst_compute, dst_compute_opts),
(dst_storage, dst_storage_opts),
(dst_identity, dst_identity_opts),
(dst_image, dst_image_opts),
(dst_network, dst_network_opts),
(dst_objstorage, dst_objstorage_opts),
(snapshot, snapshot_opts),
(import_rules, import_rules_opts),
(initial_check, initial_check_opts),
(condense, condense_opts),
(database, database_opts),
(import_rules, import_rules_opts)
]
CONF = cfg.CONF
name_configs = ['configs/config.ini']
def init_config(name_config=None):
for i in cfg_for_reg:
CONF.register_group(i[0])
CONF.register_opts(i[1], i[0])
if name_config:
name_configs[0] = name_config
CONF(default_config_files=name_configs, args="")
def get_plugins():
plugins = addons
dir_plugins = dir(plugins)
exclude_field = ['__author__', '__builtins__', '__doc__', '__file__',
'__name__', '__package__', '__path__']
plugins = [(item, plugins.__dict__[item])
for item in dir_plugins if item not in exclude_field]
return plugins
def find_group(group):
for g in xrange(len(cfg_for_reg)):
if group.name == cfg_for_reg[g][0].name:
return g
return -1
def find_field(field, fields):
for g in xrange(len(fields)):
if field.name == fields[g].name:
return g
return -1
def merge_fields(index_pair, fields):
for field in fields:
index_field = find_field(field, cfg_for_reg[index_pair][1])
if index_field >= 0:
cfg_for_reg[index_pair][1][index_field] = field
else:
cfg_for_reg[index_pair][1].append(field)
def merge_cfg(cfg):
for pair in cfg:
index_pair = find_group(pair[0])
if index_pair == -1:
cfg_for_reg.append(pair)
else:
merge_fields(index_pair, pair[1])
def collector_configs_plugins():
plugins = get_plugins()
for plugin in plugins:
merge_cfg(plugin[1].cfg_for_reg)
name_configs.append('addons/%s/configs/config.ini' % plugin[0])
if __name__ == '__main__':
collector_configs_plugins()
init_config()
|
|
#!/usr/bin/env python
"""
This class implements the Quine-McCluskey algorithm for minimization of boolean
functions.
Based on code from Robert Dick <[email protected]> and Pat Maupin
<[email protected]>. Most of the original code was re-written for performance
reasons.
>>> qm = QM(['A','B'])
>>> qm.get_function(qm.solve([])[1])
'0'
>>> qm.get_function(qm.solve([1,3],[0,2])[1])
'1'
>>> qm.get_function(qm.solve([0,1,2,3])[1])
'1'
>>> qm.get_function(qm.solve([3])[1])
'(A AND B)'
>>> qm.get_function(qm.solve([0])[1])
'((NOT A) AND (NOT B))'
>>> qm.get_function(qm.solve([1,3])[1])
'A'ls
>>> qm.get_function(qm.solve([1],[3])[1])
'A'
>>> qm.get_function(qm.solve([2,3])[1])
'B'
>>> qm.get_function(qm.solve([0,2])[1])
'(NOT A)'
>>> qm.get_function(qm.solve([0,1])[1])
'(NOT B)'
>>> qm.get_function(qm.solve([1,2,3])[1])
'(A OR B)'
>>> qm.get_function(qm.solve([0,1,2])[1])
'((NOT B) OR (NOT A))'
"""
class QM:
def __init__(self, variables):
"""
Initialize the Quine-McCluskey solver.
variables: a list of strings that are the names of the variables used in
the boolean functions
"""
self.variables = variables
self.numvars = len(variables)
def solve(self, ones, dont_care=[]):
"""
Executes the Quine-McCluskey algorithm and returns its results.
ones: a list of indices for the minterms for which the function evaluates
to 1
dc: a list of indices for the minterms for which we do not care about the
function evaluation
returns: a tuple a,b; a is the complexity of the result and b is a list of
minterms which is the minified boolean function expressed as a sum of
products
"""
# Handle special case for functions that always evaluate to True or
# False.
if len(ones) == 0:
return 0, '0'
if len(ones) + len(dont_care) == 1 << self.numvars:
return 0, '1'
primes = self.compute_primes(ones + dont_care)
return self.unate_cover(list(primes), ones)
def compute_primes(self, cubes):
"""
Find all prime implicants of the function.
cubes: a list of indices for the minterms for which the function evaluates
to 1 or don't-care.
"""
sigma = []
for i in range(self.numvars + 1):
sigma.append(set())
for i in cubes:
sigma[bitcount(i)].add((i, 0))
primes = set()
while sigma:
nsigma = []
redundant = set()
for c1, c2 in zip(sigma[:-1], sigma[1:]):
nc = set()
for a in c1:
for b in c2:
m = merge(a, b)
if m is not None:
nc.add(m)
redundant |= set([a, b])
nsigma.append(nc)
primes |= set(c for cubes in sigma for c in cubes) - redundant
sigma = nsigma
return primes
def unate_cover(self, primes, ones):
"""
Use the prime implicants to find the essential prime implicants of the
function, as well as other prime implicants that are necessary to cover
the function. This method uses the Petrick's method, which is a technique
for determining all minimum sum-of-products solutions from a prime implicant
chart.
primes: the prime implicants that we want to minimize.
ones: a list of indices for the minterms for which we want the function to
evaluate to 1.
"""
chart = []
for one in ones:
column = []
for i in range(len(primes)):
if (one & (~primes[i][1])) == primes[i][0]:
column.append(i)
chart.append(column)
covers = []
if len(chart) > 0:
covers = [set([i]) for i in chart[0]]
for i in range(1, len(chart)):
new_covers = []
for cover in covers:
for prime_index in chart[i]:
x = set(cover)
x.add(prime_index)
append = True
for j in range(len(new_covers) - 1, -1, -1):
if x <= new_covers[j]:
del new_covers[j]
elif x > new_covers[j]:
append = False
if append:
new_covers.append(x)
covers = new_covers
min_complexity = 99999999
for cover in covers:
primes_in_cover = [primes[prime_index] for prime_index in cover]
complexity = self.calculate_complexity(primes_in_cover)
if complexity < min_complexity:
min_complexity = complexity
result = primes_in_cover
return min_complexity, result
def calculate_complexity(self, minterms):
"""
Calculate the complexity of the given function. The complexity is calculated
based on the following rules:
A NOT gate adds 1 to the complexity.
A n-input AND or OR gate adds n to the complexity.
minterms: a list of minterms that form the function
returns: an integer that is the complexity of the function
>>> qm = QM(['A','B','C'])
>>> qm.calculate_complexity([(1,6)])
0
>>> qm.calculate_complexity([(0,6)])
1
>>> qm.calculate_complexity([(3,4)])
2
>>> qm.calculate_complexity([(7,0)])
3
>>> qm.calculate_complexity([(1,6),(2,5),(4,3)])
3
>>> qm.calculate_complexity([(0,6),(2,5),(4,3)])
4
>>> qm.calculate_complexity([(0,6),(0,5),(4,3)])
5
>>> qm.calculate_complexity([(0,6),(0,5),(0,3)])
6
>>> qm.calculate_complexity([(3,4),(7,0),(5,2)])
10
>>> qm.calculate_complexity([(1,4),(7,0),(5,2)])
11
>>> qm.calculate_complexity([(2,4),(7,0),(5,2)])
11
>>> qm.calculate_complexity([(0,4),(7,0),(5,2)])
12
>>> qm.calculate_complexity([(0,4),(0,0),(5,2)])
15
>>> qm.calculate_complexity([(0,4),(0,0),(0,2)])
17
"""
complexity = len(minterms)
if complexity == 1:
complexity = 0
mask = (1 << self.numvars) - 1
for minterm in minterms:
masked = ~minterm[1] & mask
term_complexity = bitcount(masked)
if term_complexity == 1:
term_complexity = 0
complexity += term_complexity
complexity += bitcount(~minterm[0] & masked)
return complexity
def get_function(self, minterms):
"""
Return in human readable form a sum of products function.
minterms: a list of minterms that form the function
returns: a string that represents the function using operators AND, OR and
NOT.
"""
if isinstance(minterms, str):
return minterms
def parentheses(glue, array):
if len(array) > 1:
return ''.join(['(', glue.join(array), ')'])
else:
return glue.join(array)
or_terms = []
for minterm in minterms:
and_terms = []
for j in range(len(self.variables)):
if minterm[0] & 1 << j:
and_terms.append(self.variables[j])
elif not minterm[1] & 1 << j:
and_terms.append('(NOT %s)' % self.variables[j])
or_terms.append(parentheses(' AND ', and_terms))
return parentheses(' OR ', or_terms)
def bitcount(i):
""" Count set bits of the input. """
res = 0
while i > 0:
res += i & 1
i >>= 1
return res
def is_power_of_two_or_zero(x):
"""
Determine if an input is zero or a power of two. Alternative, determine if an
input has at most 1 bit set.
"""
return (x & (~x + 1)) == x
def merge(i, j):
""" Combine two minterms. """
if i[1] != j[1]:
return None
y = i[0] ^ j[0]
if not is_power_of_two_or_zero(y):
return None
return (i[0] & j[0], i[1] | y)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry points for YAPF.
The main APIs that YAPF exposes to drive the reformatting.
FormatFile(): reformat a file.
FormatCode(): reformat a string of code.
These APIs have some common arguments:
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
print_diff: (bool) Instead of returning the reformatted source, return a
diff that turns the formatted source into reformatter source.
verify: (bool) True if reformatted code should be verified for syntax.
"""
import difflib
import re
import sys
from lib2to3.pgen2 import tokenize
from yapf.yapflib import blank_line_calculator
from yapf.yapflib import comment_splicer
from yapf.yapflib import continuation_splicer
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_unwrapper
from yapf.yapflib import pytree_utils
from yapf.yapflib import reformatter
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapf.yapflib import subtype_assigner
def FormatFile(filename,
style_config=None,
lines=None,
print_diff=False,
verify=True,
in_place=False,
logger=None):
"""Format a single Python file and return the formatted code.
Arguments:
filename: (unicode) The file to reformat.
in_place: (bool) If True, write the reformatted code back to the file.
logger: (io streamer) A stream to output logging.
remaining arguments: see comment at the top of this module.
Returns:
Pair of (reformatted_code, encoding). reformatted_code is None if the file
is sucessfully written to (having used in_place). reformatted_code is a
diff if print_diff is True.
Raises:
IOError: raised if there was an error reading the file.
ValueError: raised if in_place and print_diff are both specified.
"""
_CheckPythonVersion()
if in_place and print_diff:
raise ValueError('Cannot pass both in_place and print_diff.')
original_source, encoding = ReadFile(filename, logger)
reformatted_source = FormatCode(original_source,
style_config=style_config,
filename=filename,
lines=lines,
print_diff=print_diff,
verify=verify)
if in_place:
with py3compat.open_with_encoding(filename,
mode='w',
encoding=encoding) as fd:
fd.write(reformatted_source)
return None, encoding
return reformatted_source, encoding
def FormatCode(unformatted_source,
filename='<unknown>',
style_config=None,
lines=None,
print_diff=False,
verify=True):
"""Format a string of Python code.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
filename: (unicode) The name of the file being reformatted.
remaining arguments: see comment at the top of this module.
Returns:
The code reformatted to conform to the desired formatting style.
"""
_CheckPythonVersion()
style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))
if not unformatted_source.endswith('\n'):
unformatted_source += '\n'
tree = pytree_utils.ParseCodeToTree(unformatted_source)
# Run passes on the tree, modifying it in place.
comment_splicer.SpliceComments(tree)
continuation_splicer.SpliceContinuations(tree)
subtype_assigner.AssignSubtypes(tree)
split_penalty.ComputeSplitPenalties(tree)
blank_line_calculator.CalculateBlankLines(tree)
uwlines = pytree_unwrapper.UnwrapPyTree(tree)
for uwl in uwlines:
uwl.CalculateFormattingInformation()
_MarkLinesToFormat(uwlines, lines)
reformatted_source = reformatter.Reformat(uwlines, verify)
if unformatted_source == reformatted_source:
return '' if print_diff else reformatted_source
code_diff = _GetUnifiedDiff(unformatted_source, reformatted_source,
filename=filename)
if print_diff:
return code_diff
return reformatted_source
def _CheckPythonVersion():
errmsg = 'yapf is only supported for Python 2.7 or 3.4+'
if sys.version_info[0] == 2:
if sys.version_info[1] < 7:
raise RuntimeError(errmsg)
elif sys.version_info[0] == 3:
if sys.version_info[1] < 4:
raise RuntimeError(errmsg)
def ReadFile(filename, logger=None):
"""Read the contents of the file.
An optional logger can be specified to emit messages to your favorite logging
stream. If specified, then no exception is raised. This is external so that it
can be used by third-party applications.
Arguments:
filename: (unicode) The name of the file.
logger: (function) A function or lambda that takes a string and emits it.
Returns:
The contents of filename.
Raises:
IOError: raised if there was an error reading the file.
"""
try:
with open(filename, 'rb') as fd:
encoding = tokenize.detect_encoding(fd.readline)[0]
except IOError as err:
if logger:
logger(err)
raise
try:
with py3compat.open_with_encoding(filename, mode='r',
encoding=encoding) as fd:
source = fd.read()
return source, encoding
except IOError as err:
if logger:
logger(err)
raise
DISABLE_PATTERN = r'^#+ +yapf: *disable$'
ENABLE_PATTERN = r'^#+ +yapf: *enable$'
def _MarkLinesToFormat(uwlines, lines):
"""Skip sections of code that we shouldn't reformat."""
if lines:
for uwline in uwlines:
uwline.disable = True
for start, end in sorted(lines):
for uwline in uwlines:
if uwline.lineno > end:
break
if uwline.lineno >= start:
uwline.disable = False
index = 0
while index < len(uwlines):
uwline = uwlines[index]
if uwline.is_comment:
if _DisableYAPF(uwline.first.value.strip()):
while index < len(uwlines):
uwline = uwlines[index]
uwline.disable = True
if uwline.is_comment and _EnableYAPF(uwline.first.value.strip()):
break
index += 1
elif re.search(DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE):
uwline.disable = True
index += 1
def _DisableYAPF(line):
return (re.search(DISABLE_PATTERN, line.split('\n')[0], re.IGNORECASE) or
re.search(DISABLE_PATTERN, line.split('\n')[-1], re.IGNORECASE))
def _EnableYAPF(line):
return (re.search(ENABLE_PATTERN, line.split('\n')[0], re.IGNORECASE) or
re.search(ENABLE_PATTERN, line.split('\n')[-1], re.IGNORECASE))
def _GetUnifiedDiff(before, after, filename='code'):
"""Get a unified diff of the changes.
Arguments:
before: (unicode) The original source code.
after: (unicode) The reformatted source code.
filename: (unicode) The code's filename.
Returns:
The unified diff text.
"""
before = before.splitlines()
after = after.splitlines()
return '\n'.join(difflib.unified_diff(before, after, filename, filename,
'(original)', '(reformatted)',
lineterm='')) + '\n'
|
|
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for the Sinkhorn divergence."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import jax.test_util
from ott.geometry import geometry
from ott.geometry import pointcloud
from ott.tools import sinkhorn_divergence
class SinkhornDivergenceTest(jax.test_util.JaxTestCase):
def setUp(self):
super().setUp()
self.rng = jax.random.PRNGKey(0)
self._dim = 4
self._num_points = 13, 17
self.rng, *rngs = jax.random.split(self.rng, 3)
a = jax.random.uniform(rngs[0], (self._num_points[0],))
b = jax.random.uniform(rngs[1], (self._num_points[1],))
self._a = a / jnp.sum(a)
self._b = b / jnp.sum(b)
def test_euclidean_point_cloud(self):
rngs = jax.random.split(self.rng, 2)
x = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
y = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
geometry_xx = pointcloud.PointCloud(x, x, epsilon=0.01)
geometry_xy = pointcloud.PointCloud(x, y, epsilon=0.01)
geometry_yy = pointcloud.PointCloud(y, y, epsilon=0.01)
div = sinkhorn_divergence._sinkhorn_divergence(
geometry_xy,
geometry_xx,
geometry_yy,
self._a,
self._b,
threshold=1e-2)
self.assertGreater(div.divergence, 0.0)
self.assertLen(div.potentials, 3)
# Test symmetric setting,
# test that symmetric evaluation converges earlier/better.
div = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud, x, x, epsilon=1e-1,
sinkhorn_kwargs={'inner_iterations': 1})
self.assertAllClose(div.divergence, 0.0, rtol=1e-5, atol=1e-5)
iters_xx = jnp.sum(div.errors[0] > 0)
iters_xx_sym = jnp.sum(div.errors[1] > 0)
self.assertGreater(iters_xx, iters_xx_sym)
def test_euclidean_autoepsilon(self):
rngs = jax.random.split(self.rng, 2)
cloud_a = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
cloud_b = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
div = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud,
cloud_a, cloud_b,
a=self._a, b=self._b,
sinkhorn_kwargs=dict(threshold=1e-2))
self.assertGreater(div.divergence, 0.0)
self.assertLen(div.potentials, 3)
self.assertLen(div.geoms, 3)
self.assertAllClose(div.geoms[0].epsilon, div.geoms[1].epsilon)
def test_euclidean_autoepsilon_not_share_epsilon(self):
rngs = jax.random.split(self.rng, 2)
cloud_a = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
cloud_b = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
div = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud,
cloud_a, cloud_b,
a=self._a, b=self._b,
sinkhorn_kwargs=dict(threshold=1e-2), share_epsilon=False)
self.assertGreater(jnp.abs(div.geoms[0].epsilon - div.geoms[1].epsilon), 0)
def test_euclidean_point_cloud_wrapper(self):
rngs = jax.random.split(self.rng, 2)
cloud_a = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
cloud_b = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
div = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud,
cloud_a, cloud_b, epsilon=0.1,
a=self._a, b=self._b,
sinkhorn_kwargs=dict(threshold=1e-2))
self.assertGreater(div.divergence, 0.0)
self.assertLen(div.potentials, 3)
self.assertLen(div.geoms, 3)
def test_euclidean_point_cloud_wrapper_no_weights(self):
rngs = jax.random.split(self.rng, 2)
cloud_a = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
cloud_b = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
div = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud,
cloud_a, cloud_b, epsilon=0.1,
sinkhorn_kwargs=dict(threshold=1e-2))
self.assertGreater(div.divergence, 0.0)
self.assertLen(div.potentials, 3)
self.assertLen(div.geoms, 3)
def test_euclidean_point_cloud_unbalanced_wrapper(self):
rngs = jax.random.split(self.rng, 2)
cloud_a = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
cloud_b = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
div = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud,
cloud_a, cloud_b, epsilon=0.1,
a=self._a +.001, b=self._b +.002,
sinkhorn_kwargs=dict(threshold=1e-2, tau_a=0.8, tau_b=0.9))
self.assertGreater(div.divergence, 0.0)
self.assertLen(div.potentials, 3)
self.assertLen(div.geoms, 3)
def test_generic_point_cloud_wrapper(self):
rngs = jax.random.split(self.rng, 2)
x = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
y = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
# Tests with 3 cost matrices passed as args
cxy = jnp.sum(jnp.abs(x[:, jnp.newaxis] - y[jnp.newaxis, :])**2, axis=2)
cxx = jnp.sum(jnp.abs(x[:, jnp.newaxis] - x[jnp.newaxis, :])**2, axis=2)
cyy = jnp.sum(jnp.abs(y[:, jnp.newaxis] - y[jnp.newaxis, :])**2, axis=2)
div = sinkhorn_divergence.sinkhorn_divergence(
geometry.Geometry,
cxy, cxx, cyy, epsilon=0.1,
a=self._a, b=self._b,
sinkhorn_kwargs=dict(threshold=1e-2))
self.assertIsNotNone(div.divergence)
self.assertLen(div.potentials, 3)
self.assertLen(div.geoms, 3)
# Tests with 2 cost matrices passed as args
div = sinkhorn_divergence.sinkhorn_divergence(
geometry.Geometry,
cxy, cxx, epsilon=0.1,
a=self._a, b=self._b,
sinkhorn_kwargs=dict(threshold=1e-2))
self.assertIsNotNone(div.divergence)
self.assertLen(div.potentials, 3)
self.assertLen(div.geoms, 3)
# Tests with 3 cost matrices passed as kwargs
div = sinkhorn_divergence.sinkhorn_divergence(
geometry.Geometry,
cost_matrix=(cxy, cxx, cyy), epsilon=0.1,
a=self._a, b=self._b,
sinkhorn_kwargs=dict(threshold=1e-2))
self.assertIsNotNone(div.divergence)
self.assertLen(div.potentials, 3)
self.assertLen(div.geoms, 3)
@parameterized.parameters([True, False])
def test_segment_sinkhorn_result(self, shuffle):
# Test that segmented sinkhorn gives the same results:
rngs = jax.random.split(self.rng, 4)
x = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
y = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
geom_kwargs = dict(epsilon=0.01)
sinkhorn_kwargs = dict(threshold=1e-2)
true_divergence = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud,
x,
y,
a=self._a,
b=self._b,
sinkhorn_kwargs=sinkhorn_kwargs,
**geom_kwargs).divergence
if shuffle:
# Now, shuffle the order of both arrays, but
# still maintain the segment assignments:
idx_x = jax.random.shuffle(rngs[2], jnp.arange(x.shape[0] * 2))
idx_y = jax.random.shuffle(rngs[3], jnp.arange(y.shape[0] * 2))
else:
idx_x = jnp.arange(x.shape[0] * 2)
idx_y = jnp.arange(y.shape[0] * 2)
# Duplicate arrays:
x_copied = jnp.concatenate((x, x))[idx_x]
a_copied = jnp.concatenate((self._a, self._a))[idx_x]
segment_ids_x = jnp.arange(2).repeat(x.shape[0])[idx_x]
y_copied = jnp.concatenate((y, y))[idx_y]
b_copied = jnp.concatenate((self._b, self._b))[idx_y]
segment_ids_y = jnp.arange(2).repeat(y.shape[0])[idx_y]
segmented_divergences = sinkhorn_divergence.segment_sinkhorn_divergence(
x_copied,
y_copied,
segment_ids_x=segment_ids_x,
segment_ids_y=segment_ids_y,
indices_are_sorted=False,
weights_x=a_copied,
weights_y=b_copied,
sinkhorn_kwargs=sinkhorn_kwargs,
**geom_kwargs)
self.assertArraysAllClose(
true_divergence.repeat(2), segmented_divergences)
def test_segment_sinkhorn_different_segment_sizes(self):
# Test other array sizes
x1 = jnp.arange(10)[:, None].repeat(2, axis=1)
y1 = jnp.arange(11)[:, None].repeat(2, axis=1) + 0.1
# Should have larger divergence since further apart:
x2 = jnp.arange(12)[:, None].repeat(2, axis=1)
y2 = 2 * jnp.arange(13)[:, None].repeat(2, axis=1) + 0.1
segmented_divergences = sinkhorn_divergence.segment_sinkhorn_divergence(
jnp.concatenate((x1, x2)),
jnp.concatenate((y1, y2)),
num_per_segment_x=jnp.array([10, 12]),
num_per_segment_y=jnp.array([11, 13]),
epsilon=0.01)
self.assertEqual(segmented_divergences.shape[0], 2)
self.assertGreater(segmented_divergences[1], segmented_divergences[0])
true_divergences = jnp.array([
sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud, x, y, epsilon=0.01).divergence
for x, y in zip((x1, x2), (y1, y2))
])
self.assertArraysAllClose(segmented_divergences, true_divergences)
@parameterized.parameters(
[dict(anderson_acceleration=3), 1e-2],
[dict(anderson_acceleration=6), None],
[dict(chg_momentum_from=20), 1e-3],
[dict(chg_momentum_from=30), None],
[dict(momentum=1.05), 1e-3],
[dict(momentum=1.01), None])
def test_euclidean_momentum_params(self, sinkhorn_kwargs, epsilon):
# check if sinkhorn divergence sinkhorn_kwargs parameters used for
# momentum/Anderson are properly overriden for the symmetric (x,x) and
# (y,y) parts.
rngs = jax.random.split(self.rng, 2)
threshold = 3.2e-3
cloud_a = jax.random.uniform(rngs[0], (self._num_points[0], self._dim))
cloud_b = jax.random.uniform(rngs[1], (self._num_points[1], self._dim))
div = sinkhorn_divergence.sinkhorn_divergence(
pointcloud.PointCloud,
cloud_a,
cloud_b,
epsilon=epsilon,
a=self._a,
b=self._b,
sinkhorn_kwargs=sinkhorn_kwargs.update({'threshold': threshold}))
self.assertGreater(threshold, div.errors[0][-1])
self.assertGreater(threshold, div.errors[1][-1])
self.assertGreater(threshold, div.errors[2][-1])
self.assertGreater(div.divergence, 0.0)
if __name__ == '__main__':
absltest.main()
|
|
__author__ = 'jiataogu'
"""
The file is the implementation of Grid-LSTM
In this stage we only support 2D LSTM with Pooling.
"""
from recurrent import *
from attention import Attention
import logging
import copy
logger = logging.getLogger(__name__)
class Grid(Recurrent):
"""
Grid Cell for Grid-LSTM
===================================================
LSTM
[h', m'] = LSTM(x, h, m):
gi = sigmoid(Wi * x + Ui * h + Vi * m) # Vi is peep-hole
gf = sigmoid(Wf * x + Uf * h + Vf * m)
go = sigmoid(Wo * x + Uo * h + Vo * m)
gc = tanh(Wc * x +Uc * h)
m' = gf @ m + gi @ gc (@ represents element-wise dot.)
h' = go @ tanh(m')
===================================================
Grid
(here is an example for 2D Grid LSTM with priority dimension = 1)
-------------
| c' d' | Grid Block and Grid Updates.
| a a'|
| | [d' c'] = LSTM_d([b, d], c)
| b b'| [a' b'] = LSTM_t([b, d'], a)
| c d |
-------------
===================================================
Details please refer to:
"Grid Long Short-Term Memory", http://arxiv.org/abs/1507.01526
"""
def __init__(self,
output_dims,
input_dims, # [0, ... 0], 0 represents no external inputs.
priority=1,
peephole=True,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one',
activation='tanh', inner_activation='sigmoid',
use_input=False,
name=None, weights=None,
identity_connect=None
):
super(Grid, self).__init__()
assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
assert len(input_dims) == len(output_dims), '# of inputs must match # of outputs.'
"""
Initialization.
"""
self.input_dims = input_dims
self.output_dims = output_dims
self.N = len(output_dims)
self.priority = priority
self.peephole = peephole
self.use_input = use_input
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.identity_connect = identity_connect
self.axies = {0: 'x', 1: 'y', 2: 'z', 3: 'w'} # only support at most 4D now!
"""
Others info.
"""
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def build(self):
"""
Build the model weights
"""
logger.info("Building GridPool-LSTM !!")
self.W = dict()
self.U = dict()
self.V = dict()
self.b = dict()
# ******************************************************************************************
for k in xrange(self.N): # N-Grids (for 2 dimensions, 0 is for time; 1 is for depth.)
axis = self.axies[k]
# input layers:
if self.input_dims[k] > 0 and self.use_input:
# use the data information.
self.W[axis + '#i'], self.W[axis + '#f'], \
self.W[axis + '#o'], self.W[axis + '#c'] \
= [self.init((self.input_dims[k], self.output_dims[k])) for _ in xrange(4)]
# hidden layers:
for j in xrange(self.N): # every hidden states inputs.
pos = self.axies[j]
if k == j:
self.U[axis + pos + '#i'], self.U[axis + pos + '#f'], \
self.U[axis + pos + '#o'], self.U[axis + pos + '#c'] \
= [self.inner_init((self.output_dims[j], self.output_dims[k])) for _ in xrange(4)]
else:
self.U[axis + pos + '#i'], self.U[axis + pos + '#f'], \
self.U[axis + pos + '#o'], self.U[axis + pos + '#c'] \
= [self.init((self.output_dims[j], self.output_dims[k])) for _ in xrange(4)]
# bias layers:
self.b[axis + '#i'], self.b[axis + '#o'], self.b[axis + '#c'] \
= [shared_zeros(self.output_dims[k]) for _ in xrange(3)]
self.b[axis + '#f'] = self.forget_bias_init(self.output_dims[k])
# peep-hole layers:
if self.peephole:
self.V[axis + '#i'], self.V[axis + '#f'], self.V[axis + '#o'] \
= [self.init(self.output_dims[k]) for _ in xrange(3)]
# *****************************************************************************************
# set names for these weights
for A, n in zip([self.W, self.U, self.b, self.V], ['W', 'U', 'b', 'V']):
for w in A:
A[w].name = n + '_' + w
# set parameters
self.params = [self.W[s] for s in self.W] + \
[self.U[s] for s in self.U] + \
[self.b[s] for s in self.b] + \
[self.V[s] for s in self.V]
def lstm_(self, k, H, m, x, identity=False):
"""
LSTM
[h', m'] = LSTM(x, h, m):
gi = sigmoid(Wi * x + Ui * h + Vi * m) # Vi is peep-hole
gf = sigmoid(Wf * x + Uf * h + Vf * m)
go = sigmoid(Wo * x + Uo * h + Vo * m)
gc = tanh(Wc * x +Uc * h)
m' = gf @ m + gi @ gc (@ represents element-wise dot.)
h' = go @ tanh(m')
"""
assert len(H) == self.N, 'we have to use all the hidden states in Grid LSTM'
axis = self.axies[k]
# *************************************************************************
# bias energy
ei, ef, eo, ec = [self.b[axis + p] for p in ['#i', '#f', '#o', '#c']]
# hidden energy
for j in xrange(self.N):
pos = self.axies[j]
ei += T.dot(H[j], self.U[axis + pos + '#i'])
ef += T.dot(H[j], self.U[axis + pos + '#f'])
eo += T.dot(H[j], self.U[axis + pos + '#o'])
ec += T.dot(H[j], self.U[axis + pos + '#c'])
# input energy (if any)
if self.input_dims[k] > 0 and self.use_input:
ei += T.dot(x, self.W[axis + '#i'])
ef += T.dot(x, self.W[axis + '#f'])
eo += T.dot(x, self.W[axis + '#o'])
ec += T.dot(x, self.W[axis + '#c'])
# peep-hole connections
if self.peephole:
ei += m * self.V[axis + '#i'][None, :]
ef += m * self.V[axis + '#f'][None, :]
eo += m * self.V[axis + '#o'][None, :]
# *************************************************************************
# compute the gates.
i = self.inner_activation(ei)
f = self.inner_activation(ef)
o = self.inner_activation(eo)
c = self.activation(ec)
# update the memory and hidden states.
m_new = f * m + i * c
h_new = o * self.activation(m_new)
return h_new, m_new
def grid_(self,
hs_i,
ms_i,
xs_i,
priority=1,
identity=None):
"""
===================================================
Grid (2D as an example)
-------------
| c' d' | Grid Block and Grid Updates.
| a a'|
| | [d' c'] = LSTM_d([b, d], c)
| b b'| [a' b'] = LSTM_t([b, d'], a) priority
| c d |
-------------
a = my | b = hy | c = mx | d = hx
===================================================
Currently masking is not considered in GridLSTM.
"""
# compute LSTM updates for non-priority dimensions
H_new = hs_i
M_new = ms_i
for k in xrange(self.N):
if k == priority:
continue
m = ms_i[k]
x = xs_i[k]
H_new[k], M_new[k] \
= self.lstm_(k, hs_i, m, x)
if identity is not None:
if identity[k]:
H_new[k] += hs_i[k]
# compute LSTM updates along the priority dimension
if priority >= 0:
hs_ii = H_new
H_new[priority], M_new[priority] \
= self.lstm_(priority, hs_ii, ms_i[priority], xs_i[priority])
if identity is not None:
if identity[priority]:
H_new[priority] += hs_ii[priority]
return H_new, M_new
class SequentialGridLSTM(Grid):
"""
Details please refer to:
"Grid Long Short-Term Memory",
http://arxiv.org/abs/1507.01526
SequentialGridLSTM is a typical 2D-GridLSTM,
which has one flexible dimension (time) and one fixed dimension (depth)
Input information is added along x-axis.
"""
def __init__(self,
# parameters for Grid.
output_dims,
input_dims, # [0, ... 0], 0 represents no external inputs.
priority=1,
peephole=True,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one',
activation='tanh', inner_activation='sigmoid',
use_input=False,
name=None, weights=None,
identity_connect=None,
# parameters for 2D-GridLSTM
depth=5,
learn_init=False,
pooling=True,
attention=False,
shared=True,
dropout=0,
rng=None,
):
super(Grid, self).__init__()
assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
assert len(input_dims) == len(output_dims), '# of inputs must match # of outputs.'
assert input_dims[1] == 0, 'we have no y-axis inputs here.'
assert shared, 'we share the weights in this stage.'
assert not (attention and pooling), 'attention and pooling cannot be set at the same time.'
"""
Initialization.
"""
logger.info(":::: Sequential Grid-Pool LSTM ::::")
self.input_dims = input_dims
self.output_dims = output_dims
self.N = len(output_dims)
self.depth = depth
self.dropout = dropout
self.priority = priority
self.peephole = peephole
self.use_input = use_input
self.pooling = pooling
self.attention = attention
self.learn_init = learn_init
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.relu = activations.get('relu')
self.inner_activation = activations.get(inner_activation)
self.identity_connect = identity_connect
self.axies = {0: 'x', 1: 'y', 2: 'z', 3: 'w'} # only support at most 4D now!
if self.identity_connect is not None:
logger.info('Identity Connection: {}'.format(self.identity_connect))
"""
Build the model weights.
"""
# build the centroid grid.
self.build()
# input projection layer (projected to time-axis) [x]
self.Ph = Dense(input_dims[0], output_dims[0], name='Ph')
self.Pm = Dense(input_dims[0], output_dims[0], name='Pm')
self._add(self.Ph)
self._add(self.Pm)
# learn init for depth-axis hidden states/memory cells. [y]
if self.learn_init:
self.M0 = self.init((depth, output_dims[1]))
if self.pooling:
self.H0 = self.init(output_dims[1])
else:
self.H0 = self.init((depth, output_dims[1]))
self.M0.name, self.H0.name = 'M0', 'H0'
self.params += [self.M0, self.H0]
# if we use attention instead of max-pooling
if self.pooling:
self.PP = Dense(output_dims[1] + input_dims[0], output_dims[1], # init='orthogonal',
name='PP', activation='linear')
self._add(self.PP)
if self.attention:
self.A = Attention(target_dim=input_dims[0],
source_dim=output_dims[1],
hidden_dim=200, name='attender')
self._add(self.A)
# if self.dropout > 0:
# logger.info(">>>>>> USE DropOut !! <<<<<<")
# self.D = Dropout(rng=rng, p=self.dropout, name='Dropout')
"""
Others info.
"""
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def _step(self, *args):
# since depth is not determined, we cannot decide the number of inputs
# for one time step.
# if pooling is True:
# args = [raw_input] + (sequence)
# [hy] + [my]*depth (output_info)
#
inputs = args[0]
Hy_tm1 = [args[k] for k in range(1, 1 + self.depth)]
My_tm1 = [args[k] for k in range(1 + self.depth, 1 + 2 * self.depth)]
# x_axis input projection (get hx_t, mx_t)
hx_t = self.Ph(inputs) # (nb_samples, output_dim0)
mx_t = self.Pm(inputs) # (nb_samples, output_dim0)
# build computation path from bottom to top.
Hx_t = [hx_t]
Mx_t = [mx_t]
Hy_t = []
My_t = []
for d in xrange(self.depth):
hs_i = [Hx_t[-1], Hy_tm1[d]]
ms_i = [Mx_t[-1], My_tm1[d]]
xs_i = [inputs, T.zeros_like(inputs)]
hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority=self.priority, identity=self.identity_connect)
Hx_t += [hs_o[0]]
Hy_t += [hs_o[1]]
Mx_t += [ms_o[0]]
My_t += [ms_o[1]]
hx_out = Hx_t[-1]
mx_out = Mx_t[-1]
# get the output (output_y, output_x)
# MAX-Pooling
if self.pooling:
# hy_t = T.max([self.PP(hy) for hy in Hy_t], axis=0)
hy_t = T.max([self.PP(T.concatenate([hy, inputs], axis=-1)) for hy in Hy_t], axis=0)
Hy_t = [hy_t] * self.depth
if self.attention:
HHy_t = T.concatenate([hy[:, None, :] for hy in Hy_t], axis=1) # (nb_samples, n_depth, out_dim1)
annotation = self.A(inputs, HHy_t) # (nb_samples, n_depth)
hy_t = T.sum(HHy_t * annotation[:, :, None], axis=1) # (nb_samples, out_dim1)
Hy_t = [hy_t] * self.depth
R = Hy_t + My_t + [hx_out, mx_out]
return tuple(R)
def __call__(self, X, init_H=None, init_M=None,
return_sequence=False, one_step=False,
return_info='hy', train=True):
# It is training/testing path
self.train = train
# recently we did not support masking.
if X.ndim == 2:
X = X[:, None, :]
# one step
if one_step:
assert init_H is not None, 'previous state must be provided!'
assert init_M is not None, 'previous cell must be provided!'
X = X.dimshuffle((1, 0, 2))
if init_H is None:
if self.learn_init:
init_m = T.repeat(self.M0[:, None, :], X.shape[1], axis=1)
if self.pooling:
init_h = T.repeat(self.H0[None, :], self.depth, axis=0)
else:
init_h = self.H0
init_h = T.repeat(init_h[:, None, :], X.shape[1], axis=1)
init_H = []
init_M = []
for j in xrange(self.depth):
init_H.append(init_h[j])
init_M.append(init_m[j])
else:
init_H = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth
init_M = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth
pass
# computational graph !
if not one_step:
sequences = [X]
outputs_info = init_H + init_M + [None, None]
outputs, _ = theano.scan(
self._step,
sequences=sequences,
outputs_info=outputs_info
)
else:
outputs = self._step(*([X[0]] + init_H + init_M))
if return_info == 'hx':
if return_sequence:
return outputs[0].dimshuffle((1, 0, 2))
return outputs[-2][-1]
elif return_info == 'hy':
assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode".'
if return_sequence:
return outputs[2].dimshuffle((1, 0, 2))
return outputs[2][-1]
elif return_info == 'hxhy':
assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode".'
if return_sequence:
return outputs[-2].dimshuffle((1, 0, 2)), outputs[2].dimshuffle((1, 0, 2)) # x-y
return outputs[-2][-1], outputs[2][-1]
class PyramidGridLSTM2D(Grid):
"""
A variant version of Sequential LSTM where we introduce a Pyramid structure.
"""
def __init__(self,
# parameters for Grid.
output_dims,
input_dims, # [0, ... 0], 0 represents no external inputs.
priority=1,
peephole=True,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one',
activation='tanh', inner_activation='sigmoid',
use_input=True,
name=None, weights=None,
identity_connect=None,
# parameters for 2D-GridLSTM
depth=5,
learn_init=False,
shared=True,
dropout=0
):
super(Grid, self).__init__()
assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
assert len(input_dims) == len(output_dims), '# of inputs must match # of outputs.'
assert output_dims[0] == output_dims[1], 'Here we only support square model.'
assert shared, 'we share the weights in this stage.'
assert use_input, 'use input and add them in the middle'
"""
Initialization.
"""
logger.info(":::: Sequential Grid-Pool LSTM ::::")
self.input_dims = input_dims
self.output_dims = output_dims
self.N = len(output_dims)
self.depth = depth
self.dropout = dropout
self.priority = priority
self.peephole = peephole
self.use_input = use_input
self.learn_init = learn_init
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.relu = activations.get('relu')
self.inner_activation = activations.get(inner_activation)
self.identity_connect = identity_connect
self.axies = {0: 'x', 1: 'y', 2: 'z', 3: 'w'} # only support at most 4D now!
"""
Build the model weights.
"""
# build the centroid grid.
self.build()
# # input projection layer (projected to time-axis) [x]
# self.Ph = Dense(input_dims[0], output_dims[0], name='Ph')
# self.Pm = Dense(input_dims[0], output_dims[0], name='Pm')
#
# self._add(self.Ph)
# self._add(self.Pm)
# learn init/
if self.learn_init:
self.hx0 = self.init((1, output_dims[0]))
self.hy0 = self.init((1, output_dims[1]))
self.mx0 = self.init((1, output_dims[0]))
self.my0 = self.init((1, output_dims[1]))
self.hx0.name, self.hy0.name = 'hx0', 'hy0'
self.mx0.name, self.my0.name = 'mx0', 'my0'
self.params += [self.hx0, self.hy0, self.mx0, self.my0]
"""
Others info.
"""
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def _step(self, *args):
inputs = args[0]
hx_tm1 = args[1]
mx_tm1 = args[2]
hy_tm1 = args[3]
my_tm1 = args[4]
# zero constant inputs.
pre_info = [[[T.zeros_like(hx_tm1)
for _ in xrange(self.depth)]
for _ in xrange(self.depth)]
for _ in xrange(4)] # hx, mx, hy, my
pre_inputs = [[T.zeros_like(inputs)
for _ in xrange(self.depth)]
for _ in xrange(self.depth)]
for kk in xrange(self.depth):
pre_inputs[kk][kk] = inputs
pre_info[0][0][0] = hx_tm1
pre_info[1][0][0] = mx_tm1
pre_info[2][0][0] = hy_tm1
pre_info[3][0][0] = my_tm1
for step_x in xrange(self.depth):
for step_y in xrange(self.depth):
# input hidden/memory/input information
print pre_info[0][-1][-1], pre_info[2][-1][-1]
hs_i = [pre_info[0][step_x][step_y],
pre_info[2][step_x][step_y]]
ms_i = [pre_info[1][step_x][step_y],
pre_info[3][step_x][step_y]]
xs_i = [pre_inputs[step_x][step_y],
pre_inputs[step_x][step_y]]
# compute grid-lstm
hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority =-1)
# output hidden/memory information
if (step_x == self.depth - 1) and (step_y == self.depth - 1):
hx_t, mx_t, hy_t, my_t = hs_o[0], ms_o[0], hs_o[1], ms_o[1]
return hx_t, mx_t, hy_t, my_t
if step_x + 1 < self.depth:
pre_info[0][step_x + 1][step_y] = hs_o[0]
pre_info[1][step_x + 1][step_y] = ms_o[0]
if step_y + 1 < self.depth:
pre_info[2][step_x][step_y + 1] = hs_o[1]
pre_info[3][step_x][step_y + 1] = ms_o[1]
def __call__(self, X, init_x=None, init_y=None,
return_sequence=False, one_step=False):
# recently we did not support masking.
if X.ndim == 2:
X = X[:, None, :]
# one step
if one_step:
assert init_x is not None, 'previous x must be provided!'
assert init_y is not None, 'previous y must be provided!'
X = X.dimshuffle((1, 0, 2))
if init_x is None:
if self.learn_init:
init_mx = T.repeat(self.mx0, X.shape[1], axis=0)
init_my = T.repeat(self.my0, X.shape[1], axis=0)
init_hx = T.repeat(self.hx0, X.shape[1], axis=0)
init_hy = T.repeat(self.hy0, X.shape[1], axis=0)
init_input = [init_hx, init_mx, init_hy, init_my]
else:
init_x = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[0]), 1)] * 2
init_y = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * 2
init_input = init_x + init_y
else:
init_input = init_x + init_y
if not one_step:
sequence = [X]
output_info = init_input
outputs, _ = theano.scan(
self._step,
sequences=sequence,
outputs_info=output_info
)
else:
outputs = self._step(*([X[0]] + init_x + init_y))
if return_sequence:
hxs = outputs[0].dimshuffle((1, 0, 2))
hys = outputs[2].dimshuffle((1, 0, 2))
hs = T.concatenate([hxs, hys], axis=-1)
return hs
else:
hx = outputs[0][-1]
hy = outputs[2][-1]
h = T.concatenate([hx, hy], axis=-1)
return h
class PyramidLSTM(Layer):
"""
A more flexible Pyramid LSTM structure!
"""
def __init__(self,
# parameters for Grid.
output_dims,
input_dims, # [0, ... 0], 0 represents no external inputs.
priority=1,
peephole=True,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one',
activation='tanh', inner_activation='sigmoid',
use_input=True,
name=None, weights=None,
identity_connect=None,
# parameters for 2D-GridLSTM
depth=5,
learn_init=False,
shared=True,
dropout=0
):
super(PyramidLSTM, self).__init__()
assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
assert len(input_dims) == len(output_dims), '# of inputs must match # of outputs.'
assert output_dims[0] == output_dims[1], 'Here we only support square model.'
assert shared, 'we share the weights in this stage.'
assert use_input, 'use input and add them in the middle'
"""
Initialization.
"""
logger.info(":::: Sequential Grid-Pool LSTM ::::")
self.N = len(output_dims)
self.depth = depth
self.dropout = dropout
self.priority = priority
self.peephole = peephole
self.use_input = use_input
self.learn_init = learn_init
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.relu = activations.get('relu')
self.inner_activation = activations.get(inner_activation)
self.identity_connect = identity_connect
self.axies = {0: 'x', 1: 'y', 2: 'z', 3: 'w'} # only support at most 4D now!
"""
Build the model weights.
"""
# build the centroid grid (3 grid versions)
self.grids = [Grid(output_dims,
input_dims,
-1,
peephole,
init, inner_init,
forget_bias_init,
activation, inner_activation, use_input,
name='Grid*{}'.format(k)
) for k in xrange(3)]
for k in xrange(3):
self.grids[k].build()
self._add(self.grids[k])
# # input projection layer (projected to time-axis) [x]
# self.Ph = Dense(input_dims[0], output_dims[0], name='Ph')
# self.Pm = Dense(input_dims[0], output_dims[0], name='Pm')
#
# self._add(self.Ph)
# self._add(self.Pm)
# learn init/
if self.learn_init:
self.hx0 = self.init((1, output_dims[0]))
self.hy0 = self.init((1, output_dims[1]))
self.mx0 = self.init((1, output_dims[0]))
self.my0 = self.init((1, output_dims[1]))
self.hx0.name, self.hy0.name = 'hx0', 'hy0'
self.mx0.name, self.my0.name = 'mx0', 'my0'
self.params += [self.hx0, self.hy0, self.mx0, self.my0]
"""
Others info.
"""
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def _step(self, *args):
inputs = args[0]
hx_tm1 = args[1]
mx_tm1 = args[2]
hy_tm1 = args[3]
my_tm1 = args[4]
# zero constant inputs.
pre_info = [[[T.zeros_like(hx_tm1)
for _ in xrange(self.depth)]
for _ in xrange(self.depth)]
for _ in xrange(4)] # hx, mx, hy, my
pre_inputs = [[T.zeros_like(inputs)
for _ in xrange(self.depth)]
for _ in xrange(self.depth)]
for kk in xrange(self.depth):
pre_inputs[kk][kk] = inputs
pre_info[0][0][0] = hx_tm1
pre_info[1][0][0] = mx_tm1
pre_info[2][0][0] = hy_tm1
pre_info[3][0][0] = my_tm1
for step_x in xrange(self.depth):
for step_y in xrange(self.depth):
# input hidden/memory/input information
print pre_info[0][-1][-1], pre_info[2][-1][-1]
hs_i = [pre_info[0][step_x][step_y],
pre_info[2][step_x][step_y]]
ms_i = [pre_info[1][step_x][step_y],
pre_info[3][step_x][step_y]]
xs_i = [pre_inputs[step_x][step_y],
pre_inputs[step_x][step_y]]
# compute grid-lstm
if (step_x + step_y + 1) < self.depth:
hs_o, ms_o = self.grids[0].grid_(hs_i, ms_i, xs_i, priority =-1)
elif (step_x + step_y + 1) == self.depth:
hs_o, ms_o = self.grids[1].grid_(hs_i, ms_i, xs_i, priority =-1)
else:
hs_o, ms_o = self.grids[2].grid_(hs_i, ms_i, xs_i, priority =-1)
# output hidden/memory information
if (step_x == self.depth - 1) and (step_y == self.depth - 1):
hx_t, mx_t, hy_t, my_t = hs_o[0], ms_o[0], hs_o[1], ms_o[1]
return hx_t, mx_t, hy_t, my_t
if step_x + 1 < self.depth:
pre_info[0][step_x + 1][step_y] = hs_o[0]
pre_info[1][step_x + 1][step_y] = ms_o[0]
if step_y + 1 < self.depth:
pre_info[2][step_x][step_y + 1] = hs_o[1]
pre_info[3][step_x][step_y + 1] = ms_o[1]
def __call__(self, X, init_x=None, init_y=None,
return_sequence=False, one_step=False):
# recently we did not support masking.
if X.ndim == 2:
X = X[:, None, :]
# one step
if one_step:
assert init_x is not None, 'previous x must be provided!'
assert init_y is not None, 'previous y must be provided!'
X = X.dimshuffle((1, 0, 2))
if init_x is None:
if self.learn_init:
init_mx = T.repeat(self.mx0, X.shape[1], axis=0)
init_my = T.repeat(self.my0, X.shape[1], axis=0)
init_hx = T.repeat(self.hx0, X.shape[1], axis=0)
init_hy = T.repeat(self.hy0, X.shape[1], axis=0)
init_input = [init_hx, init_mx, init_hy, init_my]
else:
init_x = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[0]), 1)] * 2
init_y = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * 2
init_input = init_x + init_y
else:
init_input = init_x + init_y
if not one_step:
sequence = [X]
output_info = init_input
outputs, _ = theano.scan(
self._step,
sequences=sequence,
outputs_info=output_info
)
else:
outputs = self._step(*([X[0]] + init_x + init_y))
if return_sequence:
hxs = outputs[0].dimshuffle((1, 0, 2))
hys = outputs[2].dimshuffle((1, 0, 2))
hs = T.concatenate([hxs, hys], axis=-1)
return hs
else:
hx = outputs[0][-1]
hy = outputs[2][-1]
h = T.concatenate([hx, hy], axis=-1)
return h
|
|
"""
RealBack RESTful API functions
"""
from django.http import JsonResponse
from django.views import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db.models import Max
from django.utils import timezone
from . import models, forms
class LectureDetails(View):
def get(self, request, pin=None, join=False):
""" Read lecture details from PIN """
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
# If join URL path is used
# AND PIN for this lecture is not stored in session
# AND current user is not the lecturer for this lecture
# THEN we count a new attendee
if join and request.session.get('lecture_pin', '') != pin and request.user != lecture.course.user:
request.session['lecture_pin'] = pin
lecture.attendee_counter += 1
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
@method_decorator(login_required)
def post(self, request, pin=None):
""" Update details for existing lecture """
# TODO remember to check if user has access (owner) to lecture
form = forms.LectureForm(request.POST)
if form.is_valid():
try:
lecture = models.Lecture.objects.get(pin=pin, course__user=request.user)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture with PIN does not exist for this user'],
},
})
lecture.title = form.cleaned_data['title']
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
return JsonResponse({
'success': False,
'errors': form.errors,
})
@method_decorator(login_required)
def delete(self, request, pin=None):
""" Delete lecture with pin """
# TODO remember to check if user has access (owner) to lecture
try:
lecture = models.Lecture.objects.get(pin=pin, course__user=request.user)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture with PIN does not exist for this user'],
},
})
lecture.delete()
return JsonResponse({
'success': True,
'lecture_pin': pin,
})
class LectureTopics(View):
def get(self, request, pin=None):
""" Read list of lecture topics """
topic_list = models.LectureTopic.objects.filter(lecture__pin=pin).order_by('order')
return JsonResponse({
'success': True,
'lecture': topic_list[0].lecture.as_dict() if len(topic_list) > 0 else None,
'lecture_topics': [topic.as_dict() for topic in topic_list],
})
@method_decorator(login_required)
def post(self, request, pin=None):
""" Create lecture topic """
# TODO remember to check if user has access (owner) to course
form = forms.NewLectureTopicForm(request.POST)
if form.is_valid():
try:
lecture = models.Lecture.objects.get(pin=pin, course__user=request.user)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture with PIN does not exist for this user'],
},
})
max_topic_order = models.LectureTopic.objects.filter(lecture=lecture).aggregate(Max('order'))['order__max']
if max_topic_order is None:
# Set to -1 if no topics exist since we add 1 later
max_topic_order = -1
topic = form.save(commit=False)
topic.order = max_topic_order + 1
topic.lecture = lecture
topic.save()
return JsonResponse({
'success': True,
'lecture_topic': topic.as_dict(),
}, status=201)
# Form is invalid so return errors
return JsonResponse({
'success': False,
'errors': form.errors,
})
class LectureTopicDetails(View):
def get(self, request, pin=None, topic_id=None):
""" Read lecture topic details """
try:
topic = models.LectureTopic.objects.get(id=topic_id, lecture__pin=pin)
except models.LectureTopic.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Topic ID does not exist for this lecture'],
},
})
return JsonResponse({
'success': True,
'lecture_topic': topic.as_dict(),
})
@method_decorator(login_required)
def post(self, request, pin=None, topic_id=None):
""" Update lecture topic """
# TODO remember to check if user has access (owner) to course
form = forms.LectureTopicForm(request.POST)
if form.is_valid():
try:
topic = models.LectureTopic.objects.get(
id=topic_id,
lecture__pin=pin,
lecture__course__user=request.user
)
except models.LectureTopic.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Topic ID does not exist for this lecture and user'],
},
})
topic.title = form.cleaned_data['title']
topic.order = form.cleaned_data['order']
topic.save()
return JsonResponse({
'success': True,
'lecture_topic': topic.as_dict(),
})
# Form is invalid so return errors
return JsonResponse({
'success': False,
'errors': form.errors,
})
@method_decorator(login_required)
def delete(self, request, pin=None, topic_id=None):
""" Delete lecture topic """
# TODO remember to check if user has access (owner) to course
try:
topic = models.LectureTopic.objects.get(
id=topic_id,
lecture__pin=pin,
lecture__course__user=request.user
)
except models.LectureTopic.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Topic ID does not exist for this lecture and user'],
},
})
topic.delete()
# TODO Reorder topics after topic is deleted
return JsonResponse({
'success': True,
'lecture_topic_id': topic_id,
})
class LectureTopicUnderstanding(View):
def post(self, request, pin=None, topic_id=None):
""" Create opinion on lecture topic understanding """
form = forms.TopicUnderstandingForm(request.POST)
if form.is_valid():
try:
topic = models.LectureTopic.objects.get(id=topic_id, lecture__pin=pin)
except models.LectureTopic.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Topic ID does not exist for this lecture'],
},
})
if form.cleaned_data['understanding']:
topic.understanding += 1
else:
topic.understanding -= 1
topic.save()
return JsonResponse({
'success': True,
'lecture_topic': topic.as_dict(),
})
return JsonResponse({
'success': False,
'errors': form.errors,
})
class LectureTopicActive(View):
@method_decorator(login_required)
def post(self, request, pin=None, topic_id=None):
""" Set topic as active topic """
try:
topic = models.LectureTopic.objects.get(
id=topic_id,
lecture__pin=pin,
lecture__course__user=request.user
)
except models.LectureTopic.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Topic ID does not exist for this lecture and user'],
},
})
topic.lecture.active_topic_index = topic.order
topic.lecture.save()
return JsonResponse({
'success': True,
'lecture': topic.lecture.as_dict(),
})
class LectureQuestions(View):
def get(self, request, pin=None):
"""
Read list of latest questions
URL parameters:
order: [votes | latest] default: votes
"""
url_param = request.GET
sort_order = url_param.get('order', '')
allowed_orders = {'votes': ['-votes', '-timestamp'], 'latest': ['-timestamp']}
allowed_filters = {'votes': {'lecture__pin': pin, 'active': True}, 'latest': {'lecture__pin': pin}}
query_filter = allowed_filters.get(sort_order, {'lecture__pin': pin, 'active': True})
sort_order = allowed_orders.get(sort_order, ['-votes', '-timestamp'])
question_list = models.Question.objects.filter(**query_filter).order_by(*sort_order)
return JsonResponse({
'success': True,
'questions': [question.as_dict() for question in question_list],
})
def post(self, request, pin=None):
""" Create new question """
form = forms.QuestionForm(request.POST)
if form.is_valid():
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
lecture.lecture_activity += 1
lecture.save()
question = form.save(commit=False)
question.lecture = lecture
question.save()
return JsonResponse({
'success': True,
'question': question.as_dict(),
}, status=201)
return JsonResponse({
'success': False,
'errors': form.errors,
})
class LectureQuestionVotes(View):
def post(self, request, pin=None, question_id=None):
""" Create vote on question """
try:
question = models.Question.objects.get(id=question_id, lecture__pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Question ID does not exist for this lecture'],
},
})
question.lecture.lecture_activity += 1
question.lecture.save()
question.votes += 1
question.save()
return JsonResponse({
'success': True,
'question': question.as_dict(),
})
class LecturePace(View):
def get(self, request, pin=None):
""" Read digest of lecture pace opinions """
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
def post(self, request, pin=None):
""" Create opinion on lecture pace """
form = forms.PaceForm(request.POST)
if form.is_valid():
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
lecture.lecture_activity += 1
# checking if vote increases or decreases a value
url_param = request.GET
vote = url_param.get('vote', '')
if vote:
if form.cleaned_data['pace']:
lecture.paceup += 1
else:
lecture.pacedown += 1
else:
if form.cleaned_data['pace']:
lecture.paceup -= 1
else:
lecture.pacedown -= 1
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
return JsonResponse({
'success': False,
'errors': form.errors,
})
class LectureTimer(View):
def get(self, request, pin=None):
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
startTime = lecture.start_datetime
endTime = lecture.end_datetime
active = lecture.timer_active
return JsonResponse({
'success': True,
'startTime': startTime,
'endTime': endTime,
'active': active,
})
class StartTimer(View):
def get(self, request, pin=None):
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
lecture.start_datetime = timezone.now()
lecture.timer_active = True
lecture.rating_active = False
lecture.save()
return JsonResponse({
'success': True,
})
class StopTimer(View):
def get(self, request, pin=None):
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
lecture.timer_active = False
lecture.end_datetime = timezone.now()
lecture.rating_active = True
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.timer_active,
})
class ResetRating(View):
def get(self, request, pin=None):
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
lecture.rating_active = False
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.timer_active,
})
class Rate(View):
def post(self, request, pin=None):
form = forms.RatingForm(request.POST)
if form.is_valid():
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
lecture.rating_amount += 1
old_average = lecture.rating
lecture.rating = old_average + ((form.cleaned_data['rating'] - old_average) / lecture.rating_amount)
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
return JsonResponse({
'success': False,
'errors': form.errors,
})
class LectureVolume(View):
def get(self, request, pin=None):
""" Read digest of lecture volume opinions """
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
def post(self, request, pin=None):
""" Create opinion on lecture volume """
form = forms.VolumeForm(request.POST)
if form.is_valid():
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist'],
},
})
lecture.lecture_activity += 1
url_param = request.GET
vote = url_param.get('vote', '')
if vote:
if form.cleaned_data['volume']:
lecture.volumeup += 1
else:
lecture.volumedown += 1
else:
if form.cleaned_data['volume']:
lecture.volumeup -= 1
else:
lecture.volumedown -= 1
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
return JsonResponse({
'success': False,
'errors': form.errors,
})
class Courses(View):
@method_decorator(login_required)
def get(self, request):
""" Read list of latest courses for user """
course_list = models.Course.objects.filter(user=request.user).order_by('-id')
return JsonResponse({
'success': True,
'courses': [course.as_dict() for course in course_list],
})
@method_decorator(login_required)
def post(self, request):
""" Create new course for user """
form = forms.CourseForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
if models.Course.objects.filter(title=title, user=request.user).exists():
return JsonResponse({
'success': False,
'errors': {
'message': ['Course with this title already exists'],
},
})
course = form.save(commit=False)
course.user = request.user
course.save()
return JsonResponse({
'success': True,
'course': course.as_dict()
}, status=201)
return JsonResponse({
'success': False,
'errors': form.errors,
})
class CourseDetails(View):
@method_decorator(login_required)
def get(self, request, course_id):
""" Read course details for course_id """
# TODO remember to check if user has access (owner) to course
try:
course = models.Course.objects.get(id=course_id, user=request.user)
except models.Course.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Course ID does not exist for this user'],
},
})
return JsonResponse({
'success': True,
'course': course.as_dict(),
})
@method_decorator(login_required)
def post(self, request, course_id):
""" Update course details for course_id """
# TODO remember to check if user has access (owner) to course
form = forms.CourseForm(request.POST)
if form.is_valid():
try:
course = models.Course.objects.get(id=course_id, user=request.user)
except models.Course.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Course ID does not exist for this user'],
},
})
course.title = form.cleaned_data['title']
return JsonResponse({
'success': True,
'course': course.as_dict(),
})
return JsonResponse({
'success': False,
'errors': form.errors,
})
@method_decorator(login_required)
def delete(self, request, course_id):
""" Delete course with course_id """
# TODO remember to check if user has access (owner) to course
try:
course = models.Course.objects.get(id=course_id, user=request.user)
except models.Course.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Course ID does not exist for this user'],
},
})
course.delete()
return JsonResponse({
'success': True,
'course_id': course_id,
})
class CourseLectures(View):
@method_decorator(login_required)
def get(self, request, course_id):
""" Read list of latest lectures for course_id """
# TODO remember to check if user has access (owner) to course
lecture_list = models.Lecture.objects.filter(
course__id=course_id, course__user=request.user).order_by('-start_datetime')
return JsonResponse({
'success': True,
'lectures': [lecture.as_dict() for lecture in lecture_list],
})
@method_decorator(login_required)
def post(self, request, course_id):
""" Create new lecture for course_id """
# TODO remember to check if user has access (owner) to course
try:
course = models.Course.objects.get(id=course_id, user=request.user)
except models.Course.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Course ID does not exist for this user'],
},
})
form = forms.LectureForm(request.POST)
if form.is_valid():
lecture = form.save(commit=False)
lecture.course = course
else:
lecture_count = models.Lecture.objects.filter(course__id=course_id, course__user=request.user)
value = [0]
for lecture in lecture_count:
try:
value.append(int(lecture.title.split('-')[-1]))
except ValueError:
pass
lecture = models.Lecture(
course=course,
title=str(request.user).split('@')[0] + " - " + str(course.title) + " - " + str(max(value) + 1)
)
lecture.save()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
}, status=201)
class LectureStats(View):
@method_decorator(login_required)
def get(self, request, course_id):
""" Read list of latest lectures for course_id """
# TODO remember to check if user has access (owner) to course
lecture_list = models.Lecture.objects.filter(
course__id=course_id, course__user=request.user).order_by('-start_datetime')
attendees = {}
for lecture in lecture_list:
attendees[lecture.title] = lecture.attendee_counter
activity = {}
for lecture in lecture_list:
activity[lecture.title] = lecture.lecture_activity
question_count = {}
for lecture in lecture_list:
question_count[lecture.title] = models.Question.objects.filter(lecture=lecture).count()
ratings = {}
for lecture in lecture_list:
ratings[lecture.title] = lecture.rating
return JsonResponse({
'success': True,
'attendee_count': attendees,
'course_id': course_id,
'lecture_activity': activity,
'question_count': question_count,
'ratings': ratings,
})
class LectureResetVolume(View):
@method_decorator(login_required)
def get(self, request, pin):
""" Gets lecture and resets the volume value """
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist for this pin'],
},
})
lecture.reset_volume()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
class LectureResetPace(View):
@method_decorator(login_required)
def get(self, request, pin):
""" Gets lecture and resets the volume value """
try:
lecture = models.Lecture.objects.get(pin=pin)
except models.Lecture.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Lecture does not exist for this pin'],
},
})
lecture.reset_pace()
return JsonResponse({
'success': True,
'lecture': lecture.as_dict(),
})
class QuestionActive(View):
@method_decorator(login_required)
def get(self, request, pin=None, question_id=None):
""" Gets question active status """
try:
question = models.Question.objects.get(id=question_id, lecture__pin=pin)
except models.Question.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Question does not exist for this id'],
},
})
return JsonResponse({
'success': True,
'question': question.active,
})
@method_decorator(login_required)
def post(self, request, pin=None, question_id=None):
""" Changes question active status to False"""
try:
question = models.Question.objects.get(id=question_id, lecture__pin=pin, lecture__course__user=request.user)
except models.Question.DoesNotExist:
return JsonResponse({
'success': False,
'errors': {
'message': ['Question does not exist for this id'],
},
})
question.set_inactive()
return JsonResponse({
'success': True,
'question': question.active,
})
|
|
# -- coding: utf-8 --
from __future__ import absolute_import
from bs4 import BeautifulSoup
from urlparse import urlparse, urljoin
from re import search
from chime import repo_functions
class ChimeTestClient:
''' Stateful client for Chime Flask test client.
'''
def __init__(self, client, test):
''' Create a new client, with Flask test client and TestCase instances.
'''
self.client = client
self.test = test
response = self.client.get('/', follow_redirects=True)
self.test.assertTrue('Sign in' in response.data)
self.path, self.soup, self.headers = '/', BeautifulSoup(response.data), response.headers
def sign_in(self, email):
''' Sign in with a given email address.
Should be used inside an HTTMock that overrides Chime's internal
call to Persona verifier: https://verifier.login.persona.org/verify
'''
response = self.client.post('/sign-in', data={'assertion': email})
self.test.assertEqual(response.status_code, 200)
response = self.client.get('/', follow_redirects=True)
self.test.assertTrue('<!-- template name: articles-list -->' in response.data)
def reload(self):
''' Reload the current path.
'''
self.open_link(self.path)
def open_link(self, url, expected_status_code=200):
''' Open a link
'''
response = self.client.get(url)
self.test.assertEqual(response.status_code, expected_status_code)
if expected_status_code in range(300, 399):
self.follow_redirect(response, expected_status_code)
else:
self.path, self.soup, self.headers = url, BeautifulSoup(response.data), response.headers
def follow_link(self, href):
''' Follow a link after making sure it's present in the page.
'''
# Look for the link
link = self.soup.find(lambda tag: bool(tag.name == 'a' and tag['href'] == href))
response = self.client.get(link['href'])
redirect_path = href
redirect_count = 0
while response.status_code in range(301, 399) and redirect_count < 3:
redirect_path = urlparse(response.headers['Location']).path
response = self.client.get(redirect_path)
redirect_count = redirect_count + 1
self.test.assertEqual(response.status_code, 200)
self.path, self.soup, self.headers = redirect_path, BeautifulSoup(response.data), response.headers
def follow_redirect(self, response, code):
''' Expect and follow a response HTTP redirect.
'''
self.test.assertEqual(response.status_code, code, 'Status {} should have been {}'.format(response.status_code, code))
if code in range(500, 599):
self.soup, self.headers = BeautifulSoup(response.data), response.headers
else:
redirect_path = urlparse(response.headers['Location']).path
response = self.client.get(redirect_path)
redirect_count = 0
while response.status_code in range(301, 399) and redirect_count < 3:
redirect_path = urlparse(response.headers['Location']).path
response = self.client.get(redirect_path)
redirect_count = redirect_count + 1
self.test.assertEqual(response.status_code, 200)
self.path, self.soup, self.headers = redirect_path, BeautifulSoup(response.data), response.headers
def get_branch_name(self):
''' Extract and return the branch name from the current soup.
'''
# Assumes there is an HTML comment in the format '<!-- branch: 1234567 -->'
branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), unicode(self.soup))
self.test.assertIsNotNone(branch_search)
try:
branch_name = branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
return branch_name
def start_task(self, description):
''' Look for form to start a task, submit it.
'''
# verify we're on a page that has a start activity link
start_link = self.soup.find(id='submit-start-activity')
self.test.assertIsNotNone(start_link, u'No start link on current page')
# if the modal container's not open, click the start activity link
if not self.soup.find('div', 'modal-container', 'is-open'):
self.open_link(start_link['href'])
# verify that the modal container is now open
self.test.assertIsNotNone(self.soup.find('div', 'modal-container', 'is-open'), u'No open modal container when expected')
# find the create task form and submit it
button = self.soup.find(id='submit-create-activity')
self.test.assertIsNotNone(button)
form = button.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button', 'textarea']) if i.has_attr('name')}
data['task_description'] = description
start_task_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(start_task_path, data=data)
if response.status_code == 200:
self.soup, self.headers = BeautifulSoup(response.data), response.headers
else:
self.follow_redirect(response, 303)
def delete_task(self, branch_name):
''' Look for button to delete a task, click it.
'''
button = self.soup.select('#{}-delete'.format(branch_name))[0]
form = button.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button'])}
delete_task_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(delete_task_path, data=data)
self.follow_redirect(response, 303)
def add_category(self, category_name):
''' Look for form to add a category, submit it.
'''
input = self.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add topic'))
form = input.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button'])}
data[input['name']] = category_name
add_category_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(add_category_path, data=data)
# Drop down to where the subcategories are.
self.follow_redirect(response, 303)
def add_categories(self, category_list):
''' Add many categories.
'''
for category_name in category_list:
self.add_category(category_name)
def add_subcategory(self, subcategory_name):
''' Look for form to add a subcategory, submit it..
'''
input = self.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add subtopic'))
form = input.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button'])}
data[input['name']] = subcategory_name
add_subcategory_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(add_subcategory_path, data=data)
# Drop down into the subcategory where the articles are.
self.follow_redirect(response, 303)
def add_article(self, article_name):
''' Look for form to add an article, submit it.
'''
# Create a new article.
input = self.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add article'))
form = input.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button'])}
data[input['name']] = article_name
add_article_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(add_article_path, data=data)
# View the new article.
self.follow_redirect(response, 303)
def quick_activity_setup(self, description, category_name=u'', subcategory_name=u'', article_name=u''):
''' Set up an activity quickly, with topic, sub-topic, and article if requested.
'''
# Start a new task
self.start_task(description=description)
branch_name = self.get_branch_name()
# Look for an "other" link that we know about - is it a category?
self.follow_link(href='/tree/{}/edit/other/'.format(branch_name))
# Create a new category, subcategory, and article.
if category_name:
self.add_category(category_name=category_name)
if subcategory_name:
self.add_subcategory(subcategory_name=subcategory_name)
if article_name:
self.add_article(article_name=article_name)
return branch_name
def submit_edit_article_form(self, title_str, body_str):
''' Submit the edit article form and return the response for testing or passing on.
Note: This will submit the form even if it doesn't have a visible save/submit button.
'''
body = self.soup.find(lambda tag: bool(tag.name == 'textarea' and tag.get('name') == 'en-body'))
form = body.find_parent('form')
title = form.find(lambda tag: bool(tag.name == 'input' and tag.get('name') == 'en-title'))
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('type', u'') != 'submit' or i.get('value', u'') != 'Preview'}
data[title['name']] = title_str
data[body['name']] = body_str
edit_article_path = urlparse(urljoin(self.path, form['action'])).path
return self.client.post(edit_article_path, data=data)
def edit_article(self, title_str, body_str):
''' Look for form to edit an article, submit it.
Note: This will submit the form even if it doesn't have a visible save/submit button.
'''
response = self.submit_edit_article_form(title_str, body_str)
# View the updated article.
self.follow_redirect(response, 303)
def edit_article_and_fail(self, title_str, body_str, expected_status_code=400):
''' Look for form to edit an article we know to be published, submit it and assert that the sumbission fails.
'''
response = self.submit_edit_article_form(title_str, body_str)
# Assert that the submission failed
self.test.assertTrue(response.status_code in range(expected_status_code, expected_status_code + 99))
def preview_article(self, title_str, body_str):
''' Look for form to edit an article, preview it.
'''
body = self.soup.find(lambda tag: bool(tag.name == 'textarea' and tag.get('name') == 'en-body'))
form = body.find_parent('form')
title = form.find(lambda tag: bool(tag.name == 'input' and tag.get('name') == 'en-title'))
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('type') != 'submit' or i.get('value') == 'Preview'}
data[title['name']] = title_str
data[body['name']] = body_str
edit_article_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(edit_article_path, data=data)
# View the updated article.
self.follow_redirect(response, 303)
def follow_modify_category_link(self, title_str):
''' Find the (sub-)category edit button in the last soup and follow it.
'''
mod_link = self.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == title_str))
mod_li = mod_link.find_parent('li')
mod_span = mod_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class')))
mod_link = mod_span.find_parent('a')
self.follow_link(mod_link['href'])
def submit_edit_category_form(self, title_str, description_str):
''' Submit the edit category form and return the response for testing or passing on.
Note: This will submit the form even if it doesn't have a visible save/submit button.
'''
description = self.soup.find('textarea', {'name': 'en-description'})
form = description.find_parent('form')
title = form.find('input', {'name': 'en-title'})
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('type', u'') != 'submit' or i.get('value', u'') != 'delete_category'}
data[title['name']] = title_str
data[description['name']] = description_str
edit_article_path = urlparse(urljoin(self.path, form['action'])).path
return self.client.post(edit_article_path, data=data)
def edit_category(self, title_str, description_str):
''' Look for form to edit a category's details, submit it.
Note: This will submit the form even if it doesn't have a visible save/submit button.
'''
response = self.submit_edit_category_form(title_str, description_str)
# View the updated article.
self.follow_redirect(response, 303)
def delete_category(self):
''' Look for the delete button, submit it.
'''
description = self.soup.find('textarea', {'name': 'en-description'})
form = description.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('type', u'') != 'submit' or i.get('value', u'') != 'save_category'}
delete_category_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(delete_category_path, data=data)
self.follow_redirect(response, 303)
def delete_article(self, title_str):
''' Look for the article delete button, submit it
'''
del_link = self.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == title_str))
del_li = del_link.find_parent('li')
del_span = del_li.find(lambda tag: bool(tag.name == 'span' and 'fa-trash' in tag.get('class')))
del_form = del_span.find_parent('form')
self.test.assertEqual(del_form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in del_form.find_all(['input', 'button', 'textarea'])}
delete_path = urlparse(urljoin(self.path, del_form['action'])).path
response = self.client.post(delete_path, data=data)
self.follow_redirect(response, 303)
def request_feedback(self, comment_text=u'', task_description=None):
''' Look for form to request feedback, submit it.
'''
form = self.soup.find('form', {'data-test-id': 'request-feedback-form'})
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button', 'textarea'])}
data['comment_text'] = comment_text
if task_description is not None:
data['task_description'] = task_description
save_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(save_path, data=data)
# View the saved feedback.
self.follow_redirect(response, 303)
def rename_activity(self, task_description=None):
''' Look for form to rename an activity, submit it.
'''
form = self.soup.find('form', {'data-test-id': 'rename-activity-form'})
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button', 'textarea'])}
if task_description is not None:
data['task_description'] = task_description
save_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(save_path, data=data)
# View the saved feedback.
self.follow_redirect(response, 303)
def leave_feedback(self, comment_text=u''):
''' Look for form to leave feedback, submit it.
'''
form = self.soup.find('form', {'data-test-id': 'leave-comment-form'})
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'') for i in form.find_all(['input', 'button', 'textarea'])}
data['comment_text'] = comment_text
save_feedback_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(save_feedback_path, data=data)
# View the saved feedback.
self.follow_redirect(response, 303)
def approve_activity(self):
''' Look for form to approve activity, submit it.
'''
button = self.soup.find(lambda tag: bool(tag.name == 'button' and tag.get('value') == 'Endorse Edits'))
form = button.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('value') != 'Leave a Comment'}
approve_activity_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(approve_activity_path, data=data)
# View the saved feedback.
self.follow_redirect(response, 303)
def publish_activity(self, expected_status_code=303):
''' Look for form to publish activity, submit it.
'''
button = self.soup.find(lambda tag: bool(tag.name == 'button' and tag.get('value') == 'Publish'))
form = button.find_parent('form')
self.test.assertEqual(form['method'].upper(), 'POST')
data = {i['name']: i.get('value', u'')
for i in form.find_all(['input', 'button', 'textarea'])
if i.get('value') != 'Leave a Comment'}
publish_activity_path = urlparse(urljoin(self.path, form['action'])).path
response = self.client.post(publish_activity_path, data=data)
# View the published activity.
self.follow_redirect(response, expected_status_code)
|
|
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.query import F
from django.http import Http404
from django_filters import ModelChoiceFilter
from django_filters.rest_framework import CharFilter
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.rest_framework import FilterSet
from rest_framework import filters
from rest_framework import viewsets
from rest_framework.response import Response
from .models import AttemptLog
from .models import ContentSessionLog
from .models import ContentSummaryLog
from .models import ExamAttemptLog
from .models import ExamLog
from .models import MasteryLog
from .models import UserSessionLog
from .permissions import ExamActivePermissions
from .serializers import AttemptLogSerializer
from .serializers import ContentSessionLogSerializer
from .serializers import ContentSummaryLogSerializer
from .serializers import ExamAttemptLogSerializer
from .serializers import ExamLogSerializer
from .serializers import MasteryLogSerializer
from .serializers import TotalContentProgressSerializer
from .serializers import UserSessionLogSerializer
from kolibri.core.auth.api import KolibriAuthPermissions
from kolibri.core.auth.api import KolibriAuthPermissionsFilter
from kolibri.core.auth.filters import HierarchyRelationsFilter
from kolibri.core.auth.models import Classroom
from kolibri.core.auth.models import Collection
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import LearnerGroup
from kolibri.core.content.api import OptionalPageNumberPagination
from kolibri.core.exams.models import Exam
class BaseLogFilter(FilterSet):
facility = ModelChoiceFilter(method="filter_facility", queryset=Facility.objects.all())
classroom = ModelChoiceFilter(method="filter_classroom", queryset=Classroom.objects.all())
learner_group = ModelChoiceFilter(method="filter_learner_group", queryset=LearnerGroup.objects.all())
# Only a superuser can filter by facilities
def filter_facility(self, queryset, name, value):
return queryset.filter(user__facility=value)
def filter_classroom(self, queryset, name, value):
return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
ancestor_collection=value,
target_user=F("user"),
)
def filter_learner_group(self, queryset, name, value):
return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
ancestor_collection=value,
target_user=F("user"),
)
class LoggerViewSet(viewsets.ModelViewSet):
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
model = self.queryset.model
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
try:
instance = model.objects.get(id=self.kwargs[lookup_url_kwarg])
self.check_object_permissions(request, instance)
except (ValueError, ObjectDoesNotExist):
raise Http404
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
default_response = dict(request.data)
# First look if the computed fields to be updated are listed:
updating_fields = getattr(serializer.root, 'update_fields', None)
# If not, fetch all the fields that are computed methods:
if updating_fields is None:
updating_fields = [field for field in serializer.fields if getattr(serializer.fields[field], 'method_name', None)]
for field in updating_fields:
method_name = getattr(serializer.fields[field], 'method_name', None)
if method_name:
method = getattr(serializer.root, method_name)
default_response[field] = method(instance)
return Response(default_response)
class ContentSessionLogFilter(BaseLogFilter):
class Meta:
model = ContentSessionLog
fields = ['user_id', 'content_id']
class ContentSessionLogViewSet(LoggerViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
queryset = ContentSessionLog.objects.all()
serializer_class = ContentSessionLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ContentSessionLogFilter
class ContentSummaryLogFilter(BaseLogFilter):
class Meta:
model = ContentSummaryLog
fields = ['user_id', 'content_id']
class ContentSummaryLogViewSet(LoggerViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
queryset = ContentSummaryLog.objects.all()
serializer_class = ContentSummaryLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ContentSummaryLogFilter
class TotalContentProgressViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
queryset = FacilityUser.objects.all()
serializer_class = TotalContentProgressSerializer
class UserSessionLogFilter(BaseLogFilter):
class Meta:
model = UserSessionLog
fields = ['user_id']
class UserSessionLogViewSet(LoggerViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
queryset = UserSessionLog.objects.all()
serializer_class = UserSessionLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = UserSessionLogFilter
class MasteryFilter(FilterSet):
class Meta:
model = MasteryLog
fields = ['summarylog']
class MasteryLogViewSet(LoggerViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
queryset = MasteryLog.objects.all()
serializer_class = MasteryLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = MasteryFilter
class AttemptFilter(FilterSet):
content = CharFilter(method="filter_content")
def filter_content(self, queryset, name, value):
return queryset.filter(masterylog__summarylog__content_id=value)
class Meta:
model = AttemptLog
fields = ['masterylog', 'complete', 'user', 'content']
class AttemptLogViewSet(LoggerViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend, filters.OrderingFilter)
queryset = AttemptLog.objects.all()
serializer_class = AttemptLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = AttemptFilter
ordering_fields = ('end_timestamp',)
ordering = ('end_timestamp',)
class ExamAttemptFilter(FilterSet):
exam = ModelChoiceFilter(method="filter_exam", queryset=Exam.objects.all())
user = ModelChoiceFilter(method="filter_user", queryset=FacilityUser.objects.all())
def filter_exam(self, queryset, name, value):
return queryset.filter(examlog__exam=value)
def filter_user(self, queryset, name, value):
return queryset.filter(examlog__user=value)
class Meta:
model = ExamAttemptLog
fields = ['examlog', 'exam', 'user']
class ExamAttemptLogViewSet(LoggerViewSet):
permission_classes = (ExamActivePermissions, KolibriAuthPermissions, )
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend, filters.OrderingFilter)
queryset = ExamAttemptLog.objects.all()
serializer_class = ExamAttemptLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ExamAttemptFilter
class ExamLogFilter(BaseLogFilter):
collection = ModelChoiceFilter(method="filter_collection", queryset=Collection.objects.all())
def filter_collection(self, queryset, name, collection):
return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
target_user=F('user'),
ancestor_collection=collection,
)
class Meta:
model = ExamLog
fields = ['user', 'exam']
class ExamLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
queryset = ExamLog.objects.all()
serializer_class = ExamLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ExamLogFilter
|
|
from AppKit import *
import re
from lib.tools.defaults import getDefault
from lib.scripting.PyDETextView import PyDETextView, Output
from lib.scripting.scriptTools import ScriptRunner
from lib.scripting.scriptingWindow import PyTextEditor
from lib.scripting.cocoaDrawingTools import DrawingTools
import warnings
epsPasteBoardType = "CorePasteboardFlavorType 0x41494342"
variableRE = r".*^%s\s?=\s?([0-9]+)\s?$"
height_RE = re.compile(variableRE % "HEIGHT", re.MULTILINE + re.DOTALL)
width_RE = re.compile(variableRE % "WIDTH", re.MULTILINE + re.DOTALL)
size_RE = re.compile(r".*^size\s?\(\s?([0-9]+),\s?([0-9]+)\s?\)\s?$",
re.MULTILINE + re.DOTALL)
class TinyDrawBotDrawingTools(DrawingTools):
"""
sub class of drawing tools so code writtin in DrawBot will work.
"""
def __init__(self):
super(TinyDrawBotDrawingTools, self).__init__()
self._savePDFPath = None
def saveImage(self, path):
self._savePDFPath = path
saveimage = saveImage
class DrawView(NSView):
def __new__(cls, *arg, **kwargs):
self = cls.alloc().init()
return self
def __init__(self, errorView):
self.setFrame_(NSMakeRect(0, 0, 1000, 1000))
self._errorView = errorView
self._code = ""
self._runRaw = False
self._pdfImage = None
self._startDrag = False
self._drawingTools = TinyDrawBotDrawingTools()
def getPath(self):
window = self.window()
if window is None:
return None
document = window.document()
if document is None:
return None
url = document.fileURL()
if url is None:
return None
return url.path()
def setCode(self, code, runRaw=False):
height, width = self.frame()[1]
heightMath = height_RE.match(code)
if heightMath:
height = int(heightMath.group(1))
widthMath = width_RE.match(code)
if widthMath:
width = int(widthMath.group(1))
code = "WIDTH = %s\nHEIGHT = %s\n" %(width, height) + code
self.setFrame_(NSMakeRect(0, 0, width, height))
self._code = code
self._runRaw = runRaw
self._pdfImage = None
self.createPDFdata()
self.savePDF_(self._drawingTools._savePDFPath)
def runCode(self):
self._errorView.set("")
if not self._code:
return
self._drawingTools._reset()
if getDefault("PyDEClearOutput", True):
self._errorView.clear()
self.stdout = Output(self._errorView)
self.stderr = Output(self._errorView, True)
path = self.getPath()
namespace = dict()
for name in self._drawingTools.__all__:
namespace[name] = getattr(self._drawingTools, name)
ScriptRunner(text=self._code, path=path, stdout=self.stdout, stderr=self.stderr, namespace=namespace)
def createPDFdata(self):
self._pdfImage = NSPDFImageRep.imageRepWithData_(self._pdfData)
def refresh(self):
self.setNeedsDisplay_(True)
def drawRect_(self, rect):
if self._runRaw:
self.runCode()
return
if self._pdfImage is None:
self.runCode()
else:
self._pdfImage.drawAtPoint_((0, 0))
### drag pdf data out :)
def mouseDown_(self, event):
if self._pdfImage is None:
return
self._startDrag = True
def mouseDragged_(self, event):
if self._pdfImage is None:
return
if not self._startDrag:
return
self.startDrag = False
pboard = NSPasteboard.pasteboardWithName_(NSDragPboard)
pboard.declareTypes_owner_(["com.adobe.pdf", NSPDFPboardType, NSPostScriptPboardType, NSTIFFPboardType, epsPasteBoardType], self)
w, h = self._pdfImage.size()
srcRect = ((0, 0), (w, h))
if w > 400 or h > 400:
if w > h:
scale = 400.0 / w
else:
scale = 400.0 / h
dstRect = ((0, 0), (scale * w, scale * h))
x, y = self.convertPoint_fromView_(event.locationInWindow(), None)
offset = x * (1 - scale), y * (1 - scale)
else:
dstRect = srcRect
offset = (0, 0)
drawImage = NSImage.alloc().initWithSize_(dstRect[1])
drawImage.lockFocus()
self._pdfImage.drawInRect_(dstRect)
drawImage.unlockFocus()
self.dragImage_at_offset_event_pasteboard_source_slideBack_(
drawImage, offset, (0, 0), event, pboard, self, True)
def pasteboard_provideDataForType_(self, pboard, _type):
if _type == NSPDFPboardType or _type == "com.adobe.pdf":
pboard.setData_forType_(self._pdfData, NSPDFPboardType)
elif _type == NSPostScriptPboardType:
pboard.setData_forType_(self._epsData, NSPostScriptPboardType)
elif _type == NSTIFFPboardType:
pboard.setData_forType_(self._tiffData, NSTIFFPboardType)
elif _type == epsPasteBoardType:
pboard.setData_forType_(self._epsData, epsPasteBoardType)
def _get_pdfData(self):
return self.dataWithPDFInsideRect_(((0, 0), self.frame()[1]))
_pdfData = property(_get_pdfData)
def _get_epsData(self):
return self.dataWithEPSInsideRect_(((0, 0), self.frame()[1]))
_epsData = property(_get_epsData)
def _get_tiffData(self):
self._pdfImage.size()
im = NSImage.alloc().initWithSize_(self._pdfImage.size())
im.lockFocus()
self._pdfImage.drawAtPoint_((0, 0))
im.unlockFocus()
return im.TIFFRepresentation()
_tiffData = property(_get_tiffData)
def savePDF_(self, path):
if path is not None:
self._pdfData.writeToFile_atomically_(path , False)
class TinyDrawBotPyDETextView(PyDETextView):
def setDrawView_(self, view):
self._drawView = view
def runPython_(self, sender):
if hasattr(self, "_drawView"):
self._drawView.setCode(self.string())
class TinyDrawBotTextEditor(PyTextEditor):
nsTextViewClass = TinyDrawBotPyDETextView
def setDrawView(self, view):
self.getNSTextView().setDrawView_(view)
|
|
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.dummy import DummyRegressor
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_no_warnings
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit
from sklearn import datasets
from sklearn.compose import TransformedTargetRegressor
friedman = datasets.make_friedman1(random_state=0)
def test_transform_target_regressor_error():
X, y = friedman
# provide a transformer and functions at the same time
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=StandardScaler(),
func=np.exp, inverse_func=np.log)
with pytest.raises(ValueError,
match="'transformer' and functions"
" 'func'/'inverse_func' cannot both be set."):
regr.fit(X, y)
# fit with sample_weight with a regressor which does not support it
sample_weight = np.ones((y.shape[0],))
regr = TransformedTargetRegressor(regressor=OrthogonalMatchingPursuit(),
transformer=StandardScaler())
with pytest.raises(TypeError, match=r"fit\(\) got an unexpected "
"keyword argument 'sample_weight'"):
regr.fit(X, y, sample_weight=sample_weight)
# func is given but inverse_func is not
regr = TransformedTargetRegressor(func=np.exp)
with pytest.raises(ValueError, match="When 'func' is provided, "
"'inverse_func' must also be provided"):
regr.fit(X, y)
def test_transform_target_regressor_invertible():
X, y = friedman
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.sqrt, inverse_func=np.log,
check_inverse=True)
with pytest.warns(UserWarning, match="The provided functions or"
" transformer are not strictly inverse of each other."):
regr.fit(X, y)
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.sqrt, inverse_func=np.log)
regr.set_params(check_inverse=False)
assert_no_warnings(regr.fit, X, y)
def _check_standard_scaled(y, y_pred):
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
assert_allclose((y - y_mean) / y_std, y_pred)
def _check_shifted_by_one(y, y_pred):
assert_allclose(y + 1, y_pred)
def test_transform_target_regressor_functions():
X, y = friedman
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.log, inverse_func=np.exp)
y_pred = regr.fit(X, y).predict(X)
# check the transformer output
y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
assert_allclose(np.log(y), y_tran)
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran.reshape(-1, 1)).squeeze())
assert y.shape == y_pred.shape
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
# check the regressor output
lr = LinearRegression().fit(X, regr.func(y))
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
def test_transform_target_regressor_functions_multioutput():
X = friedman[0]
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.log, inverse_func=np.exp)
y_pred = regr.fit(X, y).predict(X)
# check the transformer output
y_tran = regr.transformer_.transform(y)
assert_allclose(np.log(y), y_tran)
assert_allclose(y, regr.transformer_.inverse_transform(y_tran))
assert y.shape == y_pred.shape
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
# check the regressor output
lr = LinearRegression().fit(X, regr.func(y))
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
@pytest.mark.parametrize("X,y", [friedman,
(friedman[0],
np.vstack((friedman[1],
friedman[1] ** 2 + 1)).T)])
def test_transform_target_regressor_1d_transformer(X, y):
# All transformer in scikit-learn expect 2D data. FunctionTransformer with
# validate=False lift this constraint without checking that the input is a
# 2D vector. We check the consistency of the data shape using a 1D and 2D y
# array.
transformer = FunctionTransformer(func=lambda x: x + 1,
inverse_func=lambda x: x - 1)
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=transformer)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
y_tran = regr.transformer_.transform(y)
_check_shifted_by_one(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
@pytest.mark.parametrize("X,y", [friedman,
(friedman[0],
np.vstack((friedman[1],
friedman[1] ** 2 + 1)).T)])
def test_transform_target_regressor_2d_transformer(X, y):
# Check consistency with transformer accepting only 2D array and a 1D/2D y
# array.
transformer = StandardScaler()
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=transformer)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
if y.ndim == 1: # create a 2D array and squeeze results
y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
else:
y_tran = regr.transformer_.transform(y)
_check_standard_scaled(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
if y.ndim == 1: # create a 2D array and squeeze results
lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze())
else:
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
def test_transform_target_regressor_2d_transformer_multioutput():
# Check consistency with transformer accepting only 2D array and a 2D y
# array.
X = friedman[0]
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
transformer = StandardScaler()
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=transformer)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
y_tran = regr.transformer_.transform(y)
_check_standard_scaled(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
def test_transform_target_regressor_multi_to_single():
X = friedman[0]
y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)])
def func(y):
out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
return out[:, np.newaxis]
def inverse_func(y):
return y
tt = TransformedTargetRegressor(func=func, inverse_func=inverse_func,
check_inverse=False)
tt.fit(X, y)
y_pred_2d_func = tt.predict(X)
assert y_pred_2d_func.shape == (100, 1)
# force that the function only return a 1D array
def func(y):
return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
tt = TransformedTargetRegressor(func=func, inverse_func=inverse_func,
check_inverse=False)
tt.fit(X, y)
y_pred_1d_func = tt.predict(X)
assert y_pred_1d_func.shape == (100, 1)
assert_allclose(y_pred_1d_func, y_pred_2d_func)
class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
assert isinstance(X, np.ndarray)
return self
def transform(self, X):
assert isinstance(X, np.ndarray)
return X
def inverse_transform(self, X):
assert isinstance(X, np.ndarray)
return X
class DummyCheckerListRegressor(DummyRegressor):
def fit(self, X, y, sample_weight=None):
assert isinstance(X, list)
return super().fit(X, y, sample_weight)
def predict(self, X):
assert isinstance(X, list)
return super().predict(X)
def test_transform_target_regressor_ensure_y_array():
# check that the target ``y`` passed to the transformer will always be a
# numpy array. Similarly, if ``X`` is passed as a list, we check that the
# predictor receive as it is.
X, y = friedman
tt = TransformedTargetRegressor(transformer=DummyCheckerArrayTransformer(),
regressor=DummyCheckerListRegressor(),
check_inverse=False)
tt.fit(X.tolist(), y.tolist())
tt.predict(X.tolist())
with pytest.raises(AssertionError):
tt.fit(X, y.tolist())
with pytest.raises(AssertionError):
tt.predict(X)
class DummyTransformer(TransformerMixin, BaseEstimator):
"""Dummy transformer which count how many time fit was called."""
def __init__(self, fit_counter=0):
self.fit_counter = fit_counter
def fit(self, X, y=None):
self.fit_counter += 1
return self
def transform(self, X):
return X
def inverse_transform(self, X):
return X
@pytest.mark.parametrize("check_inverse", [False, True])
def test_transform_target_regressor_count_fit(check_inverse):
# regression test for gh-issue #11618
# check that we only call a single time fit for the transformer
X, y = friedman
ttr = TransformedTargetRegressor(
transformer=DummyTransformer(), check_inverse=check_inverse
)
ttr.fit(X, y)
assert ttr.transformer_.fit_counter == 1
class DummyRegressorWithExtraFitParams(DummyRegressor):
def fit(self, X, y, sample_weight=None, check_input=True):
# on the test below we force this to false, we make sure this is
# actually passed to the regressor
assert not check_input
return super().fit(X, y, sample_weight)
def test_transform_target_regressor_pass_fit_parameters():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(),
transformer=DummyTransformer()
)
regr.fit(X, y, check_input=False)
assert regr.transformer_.fit_counter == 1
def test_transform_target_regressor_route_pipeline():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(),
transformer=DummyTransformer()
)
estimators = [
('normalize', StandardScaler()), ('est', regr)
]
pip = Pipeline(estimators)
pip.fit(X, y, **{'est__check_input': False})
assert regr.transformer_.fit_counter == 1
|
|
# -*- encoding: utf-8 -*-
# CLASSIFICATION METRICS (work on solutions in {0, 1} and predictions in [0, 1])
# These can be computed for regression scores only after running
# normalize_array
from __future__ import print_function
import numpy as np
import scipy as sp
import scipy.stats
from autosklearn.constants import MULTICLASS_CLASSIFICATION, \
BINARY_CLASSIFICATION, METRIC_TO_STRING, MULTILABEL_CLASSIFICATION
from autosklearn.metrics.util import log_loss, prior_log_loss, \
binarize_predictions, normalize_array, create_multiclass_solution
def calculate_score(metric, solution, prediction, task):
if solution.shape[0] != prediction.shape[0]:
raise ValueError('Solution and prediction have different number of '
'samples: %d and %d' % (solution.shape[0],
prediction.shape[0]))
metric = METRIC_TO_STRING[metric]
return globals()[metric](solution, prediction, task)
def acc_metric(solution, prediction, task=BINARY_CLASSIFICATION):
"""
Compute the accuracy.
Get the accuracy stats
acc = (tpr + fpr) / (tn + fp + tp + fn)
Normalize, so 1 is the best and zero mean random...
:param solution:
:param prediction:
:param task:
:return:
"""
if task == BINARY_CLASSIFICATION:
if len(solution.shape) == 1:
# Solution won't be touched - no copy
solution = solution.reshape((-1, 1))
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = solution.reshape((-1, 1))
else:
raise ValueError('Solution.shape %s' % solution.shape)
if len(prediction.shape) == 2:
if prediction.shape[1] > 2:
raise ValueError('A prediction array with probability values '
'for %d classes is not a binary '
'classification problem' % prediction.shape[1])
# Prediction will be copied into a new binary array - no copy
prediction = prediction[:, 1].reshape((-1, 1))
else:
raise ValueError('Invalid prediction shape %s' % prediction.shape)
elif task == MULTICLASS_CLASSIFICATION:
if len(solution.shape) == 1:
solution = create_multiclass_solution(solution, prediction)
elif len(solution.shape ) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = create_multiclass_solution(solution.reshape((-1, 1)),
prediction)
else:
raise ValueError('Solution.shape %s' % solution.shape)
elif task == MULTILABEL_CLASSIFICATION:
pass
else:
raise NotImplementedError('acc_metric does not support task type %s'
% task)
bin_predictions = binarize_predictions(prediction, task)
tn = np.sum(np.multiply((1 - solution), (1 - bin_predictions)), axis=0,
dtype=float)
fn = np.sum(np.multiply(solution, (1 - bin_predictions)), axis=0,
dtype=float)
tp = np.sum(np.multiply(solution, bin_predictions), axis=0,
dtype=float)
fp = np.sum(np.multiply((1 - solution), bin_predictions), axis=0,
dtype=float)
# Bounding to avoid division by 0, 1e-7 because of float32
eps = np.float(1e-7)
tp = np.sum(tp)
fp = np.sum(fp)
tn = np.sum(tn)
fn = np.sum(fn)
if task in (BINARY_CLASSIFICATION, MULTILABEL_CLASSIFICATION):
accuracy = (np.sum(tp) + np.sum(tn)) / (
np.sum(tp) + np.sum(fp) + np.sum(tn) + np.sum(fn)
)
elif task == MULTICLASS_CLASSIFICATION:
accuracy = np.sum(tp) / (np.sum(tp) + np.sum(fp))
if task in (BINARY_CLASSIFICATION, MULTILABEL_CLASSIFICATION):
base_accuracy = 0.5 # random predictions for binary case
elif task == MULTICLASS_CLASSIFICATION:
label_num = solution.shape[1]
base_accuracy = 1. / label_num
# Normalize: 0 for random, 1 for perfect
score = (accuracy - base_accuracy) / sp.maximum(eps, (1 - base_accuracy))
return score
def bac_metric(solution, prediction, task=BINARY_CLASSIFICATION):
"""
Compute the normalized balanced accuracy.
The binarization and
the normalization differ for the multi-label and multi-class case.
:param solution:
:param prediction:
:param task:
:return:
"""
if task == BINARY_CLASSIFICATION:
if len(solution.shape) == 1:
# Solution won't be touched - no copy
solution = solution.reshape((-1, 1))
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = solution.reshape((-1, 1))
else:
raise ValueError('Solution.shape %s' % solution.shape)
if len(prediction.shape) == 2:
if prediction.shape[1] > 2:
raise ValueError('A prediction array with probability values '
'for %d classes is not a binary '
'classification problem' % prediction.shape[1])
# Prediction will be copied into a new binary array - no copy
prediction = prediction[:, 1].reshape((-1, 1))
else:
raise ValueError('Invalid prediction shape %s' % prediction.shape)
elif task == MULTICLASS_CLASSIFICATION:
if len(solution.shape) == 1:
solution = create_multiclass_solution(solution, prediction)
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = create_multiclass_solution(solution.reshape((-1, 1)),
prediction)
else:
raise ValueError('Solution.shape %s' % solution.shape)
elif task == MULTILABEL_CLASSIFICATION:
pass
else:
raise NotImplementedError('bac_metric does not support task type %s'
% task)
bin_prediction = binarize_predictions(prediction, task)
fn = np.sum(np.multiply(solution, (1 - bin_prediction)), axis=0,
dtype=float)
tp = np.sum(np.multiply(solution, bin_prediction), axis=0, dtype=float)
# Bounding to avoid division by 0
eps = 1e-15
tp = sp.maximum(eps, tp)
pos_num = sp.maximum(eps, tp + fn)
tpr = tp / pos_num # true positive rate (sensitivity)
if task in (BINARY_CLASSIFICATION, MULTILABEL_CLASSIFICATION):
tn = np.sum(np.multiply((1 - solution), (1 - bin_prediction)),
axis=0, dtype=float)
fp = np.sum(np.multiply((1 - solution), bin_prediction), axis=0,
dtype=float)
tn = sp.maximum(eps, tn)
neg_num = sp.maximum(eps, tn + fp)
tnr = tn / neg_num # true negative rate (specificity)
bac = 0.5 * (tpr + tnr)
base_bac = 0.5 # random predictions for binary case
elif task == MULTICLASS_CLASSIFICATION:
label_num = solution.shape[1]
bac = tpr
base_bac = 1. / label_num # random predictions for multiclass case
bac = np.mean(bac) # average over all classes
# Normalize: 0 for random, 1 for perfect
score = (bac - base_bac) / sp.maximum(eps, (1 - base_bac))
return score
def pac_metric(solution, prediction, task=BINARY_CLASSIFICATION):
"""
Probabilistic Accuracy based on log_loss metric.
We assume the solution is in {0, 1} and prediction in [0, 1].
Otherwise, run normalize_array.
:param solution:
:param prediction:
:param task:
:return:
"""
if task == BINARY_CLASSIFICATION:
if len(solution.shape) == 1:
# Solution won't be touched - no copy
solution = solution.reshape((-1, 1))
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = solution[:, 1]
else:
raise ValueError('Solution.shape %s' % solution.shape)
solution = solution.copy()
if len(prediction.shape) == 2:
if prediction.shape[1] > 2:
raise ValueError('A prediction array with probability values '
'for %d classes is not a binary '
'classification problem' % prediction.shape[1])
# Prediction will be copied into a new binary array - no copy
prediction = prediction[:, 1].reshape((-1, 1))
else:
raise ValueError('Invalid prediction shape %s' % prediction.shape)
elif task == MULTICLASS_CLASSIFICATION:
if len(solution.shape) == 1:
solution = create_multiclass_solution(solution, prediction)
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = create_multiclass_solution(solution.reshape((-1, 1)),
prediction)
else:
raise ValueError('Solution.shape %s' % solution.shape)
elif task == MULTILABEL_CLASSIFICATION:
solution = solution.copy()
else:
raise NotImplementedError('auc_metric does not support task type %s'
% task)
solution, prediction = normalize_array(solution, prediction.copy())
[sample_num, label_num] = solution.shape
if label_num == 1:
task = BINARY_CLASSIFICATION
eps = 1e-7
# Compute the base log loss (using the prior probabilities)
pos_num = 1. * np.sum(solution, axis=0, dtype=float) # float conversion!
frac_pos = pos_num / sample_num # prior proba of positive class
the_base_log_loss = prior_log_loss(frac_pos, task)
the_log_loss = log_loss(solution, prediction, task)
# Exponentiate to turn into an accuracy-like score.
# In the multi-label case, we need to average AFTER taking the exp
# because it is an NL operation
pac = np.mean(np.exp(-the_log_loss))
base_pac = np.mean(np.exp(-the_base_log_loss))
# Normalize: 0 for random, 1 for perfect
score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
return score
def f1_metric(solution, prediction, task=BINARY_CLASSIFICATION):
"""
Compute the normalized f1 measure.
The binarization differs
for the multi-label and multi-class case.
A non-weighted average over classes is taken.
The score is normalized.
:param solution:
:param prediction:
:param task:
:return:
"""
if task == BINARY_CLASSIFICATION:
if len(solution.shape) == 1:
# Solution won't be touched - no copy
solution = solution.reshape((-1, 1))
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = solution.reshape((-1, 1))
else:
raise ValueError('Solution.shape %s' % solution.shape)
if len(prediction.shape) == 2:
if prediction.shape[1] > 2:
raise ValueError('A prediction array with probability values '
'for %d classes is not a binary '
'classification problem' % prediction.shape[1])
# Prediction will be copied into a new binary array - no copy
prediction = prediction[:, 1].reshape((-1, 1))
else:
raise ValueError('Invalid prediction shape %s' % prediction.shape)
elif task == MULTICLASS_CLASSIFICATION:
if len(solution.shape) == 1:
solution = create_multiclass_solution(solution, prediction)
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = create_multiclass_solution(solution.reshape((-1, 1)),
prediction)
else:
raise ValueError('Solution.shape %s' % solution.shape)
elif task == MULTILABEL_CLASSIFICATION:
pass
else:
raise NotImplementedError('f1_metric does not support task type %s'
% task)
bin_prediction = binarize_predictions(prediction, task)
# Bounding to avoid division by 0
eps = 1e-15
fn = np.sum(np.multiply(solution, (1 - bin_prediction)), axis=0, dtype=float)
tp = np.sum(np.multiply(solution, bin_prediction), axis=0, dtype=float)
fp = np.sum(np.multiply((1 - solution), bin_prediction), axis=0, dtype=float)
true_pos_num = sp.maximum(eps, tp + fn)
found_pos_num = sp.maximum(eps, tp + fp)
tp = sp.maximum(eps, tp)
tpr = tp / true_pos_num # true positive rate (recall)
ppv = tp / found_pos_num # positive predictive value (precision)
arithmetic_mean = 0.5 * sp.maximum(eps, tpr + ppv)
# Harmonic mean:
f1 = tpr * ppv / arithmetic_mean
# Average over all classes
f1 = np.mean(f1)
# Normalize: 0 for random, 1 for perfect
if task in (BINARY_CLASSIFICATION, MULTILABEL_CLASSIFICATION):
# How to choose the "base_f1"?
# For the binary/multilabel classification case, one may want to predict all 1.
# In that case tpr = 1 and ppv = frac_pos. f1 = 2 * frac_pos / (1+frac_pos)
# frac_pos = mvmean(solution.ravel())
# base_f1 = 2 * frac_pos / (1+frac_pos)
# or predict random values with probability 0.5, in which case
# base_f1 = 0.5
# the first solution is better only if frac_pos > 1/3.
# The solution in which we predict according to the class prior frac_pos gives
# f1 = tpr = ppv = frac_pos, which is worse than 0.5 if frac_pos<0.5
# So, because the f1 score is used if frac_pos is small (typically <0.1)
# the best is to assume that base_f1=0.5
base_f1 = 0.5
# For the multiclass case, this is not possible (though it does not make much sense to
# use f1 for multiclass problems), so the best would be to assign values at random to get
# tpr=ppv=frac_pos, where frac_pos=1/label_num
elif task == MULTICLASS_CLASSIFICATION:
label_num = solution.shape[1]
base_f1 = 1. / label_num
score = (f1 - base_f1) / sp.maximum(eps, (1 - base_f1))
return score
def auc_metric(solution, prediction, task=BINARY_CLASSIFICATION):
"""
Normarlized Area under ROC curve (AUC).
Return Gini index = 2*AUC-1 for binary classification problems.
Should work for a vector of binary 0/1 (or -1/1)"solution" and any discriminant values
for the predictions. If solution and prediction are not vectors, the AUC
of the columns of the matrices are computed and averaged (with no weight).
The same for all classification problems (in fact it treats well only the
binary and multilabel classification problems).
:param solution:
:param prediction:
:param task:
:return:
"""
if task == BINARY_CLASSIFICATION:
if len(solution.shape) == 1:
# Solution won't be touched - no copy
solution = solution.reshape((-1, 1))
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = solution[:, 1]
else:
raise ValueError('Solution.shape %s' % solution.shape)
solution = solution.copy()
if len(prediction.shape) == 2:
if prediction.shape[1] > 2:
raise ValueError('A prediction array with probability values '
'for %d classes is not a binary '
'classification problem' % prediction.shape[1])
# Prediction will be copied into a new binary array - no copy
prediction = prediction[:, 1].reshape((-1, 1))
else:
raise ValueError('Invalid prediction shape %s' % prediction.shape)
elif task == MULTICLASS_CLASSIFICATION:
if len(solution.shape) == 1:
solution = create_multiclass_solution(solution, prediction)
elif len(solution.shape) == 2:
if solution.shape[1] > 1:
raise ValueError('Solution array must only contain one class '
'label, but contains %d' % solution.shape[1])
else:
solution = create_multiclass_solution(solution.reshape((-1, 1)),
prediction)
else:
raise ValueError('Solution.shape %s' % solution.shape)
elif task == MULTILABEL_CLASSIFICATION:
solution = solution.copy()
else:
raise NotImplementedError('auc_metric does not support task type %s'
% task)
solution, prediction = normalize_array(solution, prediction.copy())
label_num = solution.shape[1]
auc = np.empty(label_num)
for k in range(label_num):
r_ = scipy.stats.rankdata(prediction[:, k])
s_ = solution[:, k]
if sum(s_) == 0:
print(
'WARNING: no positive class example in class {}'.format(k + 1))
npos = np.sum(s_ == 1)
nneg = np.sum(s_ < 1)
auc[k] = (np.sum(r_[s_ == 1]) - npos * (npos + 1) / 2) / (nneg * npos)
auc[~np.isfinite(auc)] = 0
return 2 * np.mean(auc) - 1
|
|
"""Reliable low-latency audio playback and recording.
http://python-rtmixer.readthedocs.io/
"""
__version__ = '0.1.0'
import sounddevice as _sd
from pa_ringbuffer import init as _init_ringbuffer
from _rtmixer import ffi as _ffi, lib as _lib
RingBuffer = _init_ringbuffer(_ffi, _lib)
# Get constants from C library
for _k, _v in vars(_lib).items():
if _k.isupper():
globals()[_k] = _v
class _Base(_sd._StreamBase):
"""Base class for Mixer et al."""
def __init__(self, kind, qsize=16, **kwargs):
callback = _ffi.addressof(_lib, 'callback')
self._action_q = RingBuffer(_ffi.sizeof('struct action*'), qsize)
self._result_q = RingBuffer(_ffi.sizeof('struct action*'), qsize)
self._state = _ffi.new('struct state*', dict(
input_channels=0,
output_channels=0,
samplerate=0,
action_q=self._action_q._ptr,
result_q=self._result_q._ptr,
actions=_ffi.NULL,
))
_sd._StreamBase.__init__(
self, kind=kind, dtype='float32',
callback=callback, userdata=self._state, **kwargs)
self._state.samplerate = self.samplerate
self._actions = set()
self._temp_action_ptr = _ffi.new('struct action**')
@property
def actions(self):
"""The set of active "actions"."""
self._drain_result_q()
return self._actions
@property
def stats(self):
"""Get over-/underflow statistics from an *inactive* stream.
To get statistics from an :attr:`~sounddevice.Stream.active`
stream, use `fetch_and_reset_stats()`.
"""
if self.active:
raise RuntimeError('Accessing .stats on an active stream')
return _ffi.new('struct stats*', self._state.stats)
def cancel(self, action, time=0, allow_belated=True):
"""Initiate stopping a running action.
This creates another action that is sent to the callback in
order to stop the given *action*.
This function typically returns before the *action* is actually
stopped. Use `wait()` (on either one of the two actions) to
wait until it's done.
"""
cancel_action = _ffi.new('struct action*', dict(
type=CANCEL,
allow_belated=allow_belated,
requested_time=time,
action=action,
))
self._enqueue(cancel_action)
return cancel_action
def fetch_and_reset_stats(self, time=0, allow_belated=True):
"""Fetch and reset over-/underflow statistics of the stream.
"""
action = _ffi.new('struct action*', dict(
type=FETCH_AND_RESET_STATS,
allow_belated=allow_belated,
requested_time=time,
))
self._enqueue(action)
return action
def wait(self, action, sleeptime=10):
"""Wait for *action* to be finished.
Between repeatedly checking if the action is finished, this
waits for *sleeptime* milliseconds.
"""
while action in self.actions:
_sd.sleep(sleeptime)
def _check_channels(self, channels, kind):
"""Check if number of channels or mapping was given."""
assert kind in ('input', 'output')
try:
channels, mapping = len(channels), channels
except TypeError:
mapping = tuple(range(1, channels + 1))
max_channels = _sd._split(self.channels)[kind == 'output']
if max(mapping) > max_channels:
raise ValueError('Channel number too large')
if min(mapping) < 1:
raise ValueError('Channel numbers start with 1')
return channels, mapping
def _enqueue(self, action):
self._drain_result_q()
self._temp_action_ptr[0] = action
ret = self._action_q.write(self._temp_action_ptr)
if ret != 1:
raise RuntimeError('Action queue is full')
self._actions.add(action)
def _drain_result_q(self):
"""Get actions from the result queue and discard them."""
while self._result_q.readinto(self._temp_action_ptr):
try:
self._actions.remove(self._temp_action_ptr[0])
except KeyError:
assert False
class Mixer(_Base):
"""PortAudio output stream for realtime mixing.
Takes the same keyword arguments as `sounddevice.OutputStream`,
except *callback* (a callback function implemented in C is used
internally) and *dtype* (which is always ``'float32'``).
Uses default values from `sounddevice.default` (except *dtype*,
which is always ``'float32'``).
Has the same methods and attributes as `sounddevice.OutputStream`
(except :meth:`~sounddevice.Stream.write` and
:attr:`~sounddevice.Stream.write_available`), plus the following:
"""
def __init__(self, **kwargs):
_Base.__init__(self, kind='output', **kwargs)
self._state.output_channels = self.channels
def play_buffer(self, buffer, channels, start=0, allow_belated=True):
"""Send a buffer to the callback to be played back.
After that, the *buffer* must not be written to anymore.
"""
channels, mapping = self._check_channels(channels, 'output')
buffer = _ffi.from_buffer(buffer)
_, samplesize = _sd._split(self.samplesize)
action = _ffi.new('struct action*', dict(
type=PLAY_BUFFER,
allow_belated=allow_belated,
requested_time=start,
buffer=_ffi.cast('float*', buffer),
total_frames=len(buffer) // channels // samplesize,
channels=channels,
mapping=mapping,
))
self._enqueue(action)
return action
def play_ringbuffer(self, ringbuffer, channels=None, start=0,
allow_belated=True):
"""Send a `RingBuffer` to the callback to be played back.
By default, the number of channels is obtained from the ring
buffer's :attr:`~RingBuffer.elementsize`.
"""
_, samplesize = _sd._split(self.samplesize)
if channels is None:
channels = ringbuffer.elementsize // samplesize
channels, mapping = self._check_channels(channels, 'output')
if ringbuffer.elementsize != samplesize * channels:
raise ValueError('Incompatible elementsize')
action = _ffi.new('struct action*', dict(
type=PLAY_RINGBUFFER,
allow_belated=allow_belated,
requested_time=start,
ringbuffer=ringbuffer._ptr,
total_frames=ULONG_MAX,
channels=channels,
mapping=mapping,
))
self._enqueue(action)
return action
class Recorder(_Base):
"""PortAudio input stream for realtime recording.
Takes the same keyword arguments as `sounddevice.InputStream`,
except *callback* (a callback function implemented in C is used
internally) and *dtype* (which is always ``'float32'``).
Uses default values from `sounddevice.default` (except *dtype*,
which is always ``'float32'``).
Has the same methods and attributes as `Mixer`, except that
`play_buffer()` and `play_ringbuffer()` are replaced by:
"""
def __init__(self, **kwargs):
_Base.__init__(self, kind='input', **kwargs)
self._state.input_channels = self.channels
def record_buffer(self, buffer, channels, start=0, allow_belated=True):
"""Send a buffer to the callback to be recorded into.
"""
channels, mapping = self._check_channels(channels, 'input')
buffer = _ffi.from_buffer(buffer)
samplesize, _ = _sd._split(self.samplesize)
action = _ffi.new('struct action*', dict(
type=RECORD_BUFFER,
allow_belated=allow_belated,
requested_time=start,
buffer=_ffi.cast('float*', buffer),
total_frames=len(buffer) // channels // samplesize,
channels=channels,
mapping=mapping,
))
self._enqueue(action)
return action
def record_ringbuffer(self, ringbuffer, channels=None, start=0,
allow_belated=True):
"""Send a `RingBuffer` to the callback to be recorded into.
By default, the number of channels is obtained from the ring
buffer's :attr:`~RingBuffer.elementsize`.
"""
samplesize, _ = _sd._split(self.samplesize)
if channels is None:
channels = ringbuffer.elementsize // samplesize
channels, mapping = self._check_channels(channels, 'input')
if ringbuffer.elementsize != samplesize * channels:
raise ValueError('Incompatible elementsize')
action = _ffi.new('struct action*', dict(
type=RECORD_RINGBUFFER,
allow_belated=allow_belated,
requested_time=start,
ringbuffer=ringbuffer._ptr,
total_frames=ULONG_MAX,
channels=channels,
mapping=mapping,
))
self._enqueue(action)
return action
class MixerAndRecorder(Mixer, Recorder):
"""PortAudio stream for realtime mixing and recording.
Takes the same keyword arguments as `sounddevice.Stream`, except
*callback* (a callback function implemented in C is used internally)
and *dtype* (which is always ``'float32'``).
Uses default values from `sounddevice.default` (except *dtype*,
which is always ``'float32'``).
Inherits all methods and attributes from `Mixer` and `Recorder`.
"""
def __init__(self, **kwargs):
_Base.__init__(self, kind='duplex', **kwargs)
self._state.input_channels = self.channels[0]
self._state.output_channels = self.channels[1]
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import unittest
import luigi
import luigi.format
import luigi.contrib.hadoop
import luigi.contrib.hdfs
import luigi.mrrunner
import luigi.notifications
import minicluster
import mock
from luigi.mock import MockTarget
from nose.plugins.attrib import attr
luigi.notifications.DEBUG = True
luigi.contrib.hadoop.attach(minicluster)
class OutputMixin(luigi.Task):
use_hdfs = luigi.BoolParameter(default=False)
def get_output(self, fn):
if self.use_hdfs:
return luigi.contrib.hdfs.HdfsTarget('/tmp/' + fn, format=luigi.format.get_default_format() >> luigi.contrib.hdfs.PlainDir)
else:
return MockTarget(fn)
class HadoopJobTask(luigi.contrib.hadoop.JobTask, OutputMixin):
def job_runner(self):
if self.use_hdfs:
return minicluster.MiniClusterHadoopJobRunner()
else:
return luigi.contrib.hadoop.LocalJobRunner()
class Words(OutputMixin):
def output(self):
return self.get_output('words')
def run(self):
f = self.output().open('w')
f.write('kj kj lkj lkj ljoi j iljlk jlk jlk jk jkl jlk jlkj j ioj ioj kuh kjh\n')
f.write('kjsfsdfkj sdjkf kljslkj flskjdfj jkkd jjfk jk jk jk jk jk jklkjf kj lkj lkj\n')
f.close()
class WordCountJob(HadoopJobTask):
def mapper(self, line):
for word in line.strip().split():
self.incr_counter('word', word, 1)
yield word, 1
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('wordcount')
class WordFreqJob(HadoopJobTask):
def init_local(self):
self.n = 0
for line in self.input_local().open('r'):
word, count = line.strip().split()
self.n += int(count)
def mapper(self, line):
for word in line.strip().split():
yield word, 1.0 / self.n
def combiner(self, word, occurrences):
yield word, sum(occurrences)
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires_local(self):
return WordCountJob(self.use_hdfs)
def requires_hadoop(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-2')
def extra_files(self):
fn = os.listdir('.')[0] # Just return some file, doesn't matter which
return [(fn, 'my_dir/my_file')]
def init_remote(self):
f = open('my_dir/my_file') # make sure it exists
class MapOnlyJob(HadoopJobTask):
def mapper(self, line):
for word in line.strip().split():
yield (word,)
def requires_hadoop(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-3')
class UnicodeJob(HadoopJobTask):
def mapper(self, line):
yield u'test', 1
yield b'test', 1
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-4')
class UseJsonAsDataInteterchangeFormatJob(HadoopJobTask):
data_interchange_format = "json"
def mapper(self, line):
yield "json", {"data type": "json"}
def reducer(self, _, vals):
yield "", json.dumps(list(vals)[0])
def requires(self):
""" Two lines from Word.task will cause two `mapper` call. """
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-5')
class FailingJobException(Exception):
pass
class FailingJob(HadoopJobTask):
def init_hadoop(self):
raise FailingJobException('failure')
def output(self):
return self.get_output('failing')
class MyStreamingJob(luigi.contrib.hadoop.JobTask):
param = luigi.Parameter()
def read_wordcount_output(p):
count = {}
for line in p.open('r'):
k, v = line.strip().split()
count[k] = v
return count
class CommonTests(object):
@staticmethod
def test_run(test_case):
job = WordCountJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = read_wordcount_output(job.output())
test_case.assertEqual(int(c['jk']), 6)
@staticmethod
def test_run_2(test_case):
job = WordFreqJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = read_wordcount_output(job.output())
test_case.assertAlmostEquals(float(c['jk']), 6.0 / 33.0)
@staticmethod
def test_map_only(test_case):
job = MapOnlyJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line.strip())
test_case.assertEqual(c[0], 'kj')
test_case.assertEqual(c[4], 'ljoi')
@staticmethod
def test_unicode_job(test_case):
job = UnicodeJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line)
# Make sure unicode('test') isnt grouped with str('test')
# Since this is what happens when running on cluster
test_case.assertEqual(len(c), 2)
test_case.assertEqual(c[0], "test\t2\n")
@staticmethod
def test_use_json_as_data_interchange_format_job(test_case):
job = UseJsonAsDataInteterchangeFormatJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line)
test_case.assertEqual(c, ['{"data type": "json"}\n'])
@staticmethod
def test_failing_job(test_case):
job = FailingJob(use_hdfs=test_case.use_hdfs)
success = luigi.build([job], local_scheduler=True)
test_case.assertFalse(success)
class MapreduceLocalTest(unittest.TestCase):
use_hdfs = False
def run_and_check(self, args):
run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args)
return run_exit_status
def test_run(self):
CommonTests.test_run(self)
def test_run_2(self):
CommonTests.test_run_2(self)
def test_map_only(self):
CommonTests.test_map_only(self)
def test_unicode_job(self):
CommonTests.test_unicode_job(self)
def test_use_json_as_data_interchange_format_job(self):
CommonTests.test_use_json_as_data_interchange_format_job(self)
def test_failing_job(self):
CommonTests.test_failing_job(self)
def test_instantiate_job(self):
# See https://github.com/spotify/luigi/issues/738
MyStreamingJob('param_value')
def test_cmd_line(self):
class DummyHadoopTask(luigi.contrib.hadoop.JobTask):
param = luigi.Parameter()
def run(self):
if 'mypool' not in ''.join(self.jobconfs()):
raise ValueError("noooooo")
self.assertTrue(self.run_and_check(['DummyHadoopTask', '--param', 'myparam', '--pool', 'mypool']))
self.assertTrue(self.run_and_check(['DummyHadoopTask', '--param', 'myparam', '--hadoop-pool', 'mypool']))
def setUp(self):
MockTarget.fs.clear()
@attr('minicluster')
class MapreduceIntegrationTest(minicluster.MiniClusterTestCase):
""" Uses the Minicluster functionality to test this against Hadoop """
use_hdfs = True
def test_run(self):
CommonTests.test_run(self)
def test_run_2(self):
CommonTests.test_run_2(self)
def test_map_only(self):
CommonTests.test_map_only(self)
# TODO(erikbern): some really annoying issue with minicluster causes
# test_unicode_job to hang
def test_failing_job(self):
CommonTests.test_failing_job(self)
class CreatePackagesArchive(unittest.TestCase):
def setUp(self):
sys.path.append(os.path.join('test', 'create_packages_archive_root'))
def tearDown(self):
sys.path.remove(os.path.join('test', 'create_packages_archive_root'))
def _assert_module(self, add):
add.assert_called_once_with('test/create_packages_archive_root/module.py',
'module.py')
def _assert_package(self, add):
add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule.py', 'package/submodule.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule_with_absolute_import.py', 'package/submodule_with_absolute_import.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule_without_imports.py', 'package/submodule_without_imports.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py')
add.assert_any_call('test/create_packages_archive_root/package.egg-info/top_level.txt', 'package.egg-info/top_level.txt')
assert add.call_count == 7
def _assert_package_subpackage(self, add):
add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py')
assert add.call_count == 3
@mock.patch('tarfile.open')
def test_create_packages_archive_module(self, tar):
module = __import__("module", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([module], '/dev/null')
self._assert_module(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package(self, tar):
package = __import__("package", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule(self, tar):
package_submodule = __import__("package.submodule", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_submodule], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule_with_absolute_import(self, tar):
package_submodule_with_absolute_import = __import__("package.submodule_with_absolute_import", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_submodule_with_absolute_import], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule_without_imports(self, tar):
package_submodule_without_imports = __import__("package.submodule_without_imports", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_submodule_without_imports], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_subpackage(self, tar):
package_subpackage = __import__("package.subpackage", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_subpackage], '/dev/null')
self._assert_package_subpackage(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_subpackage_submodule(self, tar):
package_subpackage_submodule = __import__("package.subpackage.submodule", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_subpackage_submodule], '/dev/null')
self._assert_package_subpackage(tar.return_value.add)
if __name__ == '__main__':
HadoopJobTest.test_run_real()
|
|
import datetime
import json
import logging.handlers
import os
import pprint
import random
import re
import types
from configparser import RawConfigParser
import six.moves.urllib.error
import six.moves.urllib.parse
import six.moves.urllib.request
from splunk_eventgen.lib.eventgenexceptions import FailedLoadingPlugin, PluginNotLoaded
from splunk_eventgen.lib.eventgensamples import Sample
from splunk_eventgen.lib.eventgentoken import Token
from splunk_eventgen.lib.logging_config import logger
# 4/21/14 CS Adding a defined constant whether we're running in standalone mode or not
# Standalone mode is when we know we're Splunk embedded but we want to force
# configs to be read from a file instead of via Splunk's REST endpoint.
# This is used in the OIDemo and others for embedding the eventgen into an
# application. We want to ensure we're reading from files. It is the app's
# responsibility to ensure eventgen.conf settings are not exported to where
# SA-Eventgen can see them.
#
# The reason this is a constant instead of a config setting is we must know
# this before we read any config and we cannot use a command line parameter
# because we interpret all those as config overrides.
STANDALONE = False
# 5/10/12 CS Some people consider Singleton to be lazy. Dunno, I like it for convenience.
# My general thought on that sort of stuff is if you don't like it, reimplement it. I'll consider
# your patch.
class Config(object):
"""Reads configuration from files or Splunk REST endpoint and stores them in a 'Borg' global.
Borg is a variation on the Singleton design pattern which allows us to continually instantiate
the configuration object throughout the application and maintain state."""
DEFAULT_SAMPLE_DIR = "samples"
# Stolen from http://code.activestate.com/recipes/66531/
# This implements a Borg patterns, similar to Singleton
# It allows numerous instantiations but always shared state
__sharedState = {}
# Internal vars
_firsttime = True
_confDict = None
# Externally used vars
debug = False
verbosity = logging.ERROR
splunkEmbedded = False
sessionKey = None
grandparentdir = None
greatgrandparentdir = None
samples = []
sampleDir = None
outputWorkers = None
generatorWorkers = None
sampleTimers = []
# Config file options. We do not define defaults here, rather we pull them in
# from eventgen.conf.
# These are only options which are valid in the 'global' stanza
# 5/22 CS Except for blacklist, we define that in code, since splunk complains about it in
# the config files
threading = None
disabled = None
blacklist = r".*\.part"
__generatorworkers = []
__outputworkers = []
outputPlugins = {}
plugins = {}
outputQueue = None
generatorQueue = None
args = None
# Validations
_validSettings = [
"disabled",
"blacklist",
"spoolDir",
"spoolFile",
"breaker",
"sampletype",
"interval",
"delay",
"count",
"bundlelines",
"earliest",
"latest",
"eai:acl",
"hourOfDayRate",
"dayOfWeekRate",
"randomizeCount",
"randomizeEvents",
"outputMode",
"fileName",
"fileMaxBytes",
"fileBackupFiles",
"index",
"source",
"sourcetype",
"host",
"hostRegex",
"projectID",
"accessToken",
"mode",
"backfill",
"backfillSearch",
"eai:userName",
"eai:appName",
"timeMultiple",
"debug",
"minuteOfHourRate",
"timezone",
"dayOfMonthRate",
"monthOfYearRate",
"perDayVolume",
"outputWorkers",
"generator",
"rater",
"generatorWorkers",
"timeField",
"sampleDir",
"threading",
"profiler",
"maxIntervalsBeforeFlush",
"maxQueueLength",
"splunkMethod",
"splunkPort",
"syslogDestinationHost",
"syslogDestinationPort",
"syslogAddHeader",
"verbosity",
"useOutputQueue",
"seed",
"end",
"autotimestamps",
"autotimestamp",
"httpeventWaitResponse",
"outputCounter",
"sequentialTimestamp",
"extendIndexes",
"disableLoggingQueue",
"splitSample",
]
_validTokenTypes = {"token": 0, "replacementType": 1, "replacement": 2}
_validHostTokens = {"token": 0, "replacement": 1}
_validReplacementTypes = [
"static",
"timestamp",
"replaytimestamp",
"random",
"rated",
"file",
"mvfile",
"seqfile",
"integerid",
]
validOutputModes = []
_intSettings = [
"interval",
"outputWorkers",
"generatorWorkers",
"maxIntervalsBeforeFlush",
"maxQueueLength",
"splitSample",
"fileMaxBytes",
]
_floatSettings = ["randomizeCount", "delay", "timeMultiple"]
_boolSettings = [
"disabled",
"randomizeEvents",
"bundlelines",
"profiler",
"useOutputQueue",
"autotimestamp",
"httpeventWaitResponse",
"outputCounter",
"sequentialTimestamp",
"disableLoggingQueue",
"syslogAddHeader",
]
_jsonSettings = [
"hourOfDayRate",
"dayOfWeekRate",
"minuteOfHourRate",
"dayOfMonthRate",
"monthOfYearRate",
"autotimestamps",
]
_defaultableSettings = [
"disabled",
"spoolDir",
"spoolFile",
"breaker",
"sampletype",
"interval",
"delay",
"count",
"bundlelines",
"earliest",
"latest",
"hourOfDayRate",
"dayOfWeekRate",
"randomizeCount",
"randomizeEvents",
"outputMode",
"fileMaxBytes",
"fileBackupFiles",
"splunkHost",
"splunkPort",
"splunkMethod",
"index",
"source",
"sourcetype",
"host",
"hostRegex",
"projectID",
"accessToken",
"mode",
"minuteOfHourRate",
"timeMultiple",
"dayOfMonthRate",
"monthOfYearRate",
"perDayVolume",
"sessionKey",
"generator",
"rater",
"timeField",
"maxQueueLength",
"maxIntervalsBeforeFlush",
"autotimestamp",
"splitSample",
]
_complexSettings = {
"sampletype": ["raw", "csv"],
"mode": ["sample", "replay"],
"threading": ["thread", "process"],
}
def __init__(
self,
configfile=None,
sample=None,
override_outputter=False,
override_count=False,
override_interval=False,
override_backfill=False,
override_end=False,
threading="thread",
override_generators=None,
override_outputqueue=False,
profiler=False,
verbosity=40,
):
"""Setup Config object. Sets up Logging and path related variables."""
# Rebind the internal datastore of the class to an Instance variable
self.__dict__ = self.__sharedState
self.configfile = configfile
self.sample = sample
self.threading = threading
self.extraplugins = []
self.profiler = profiler
self.override_outputter = override_outputter
self.override_count = override_count
self.override_interval = override_interval
self.override_backfill = override_backfill
self.override_end = override_end
self.verbosity = verbosity
if override_generators is not None and override_generators >= 0:
self.generatorWorkers = override_generators
if override_outputqueue:
self.useOutputQueue = False
if self._firsttime:
# Determine some path names in our environment
self.grandparentdir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
self.greatgrandparentdir = os.path.dirname(self.grandparentdir)
# 1/11/14 CS Adding a initial config parsing step (this does this twice now, oh well, just runs once
# per execution) so that I can get config before calling parse()
c = RawConfigParser()
c.optionxform = str
c.read([os.path.join(self.grandparentdir, "default", "eventgen.conf")])
self._complexSettings["timezone"] = self._validateTimezone
self._complexSettings["seed"] = self._validateSeed
self.stopping = False
# self.copyLock = threading.Lock() if self.threading == 'thread' else multiprocessing.Lock()
self._firsttime = False
def __str__(self):
"""Only used for debugging, outputs a pretty printed representation of our Config"""
# Filter items from config we don't want to pretty print
filter_list = [
"samples",
"sampleTimers",
"__generatorworkers",
"__outputworkers",
]
# Eliminate recursive going back to parent
temp = dict(
[
(key, value)
for (key, value) in self.__dict__.items()
if key not in filter_list
]
)
return (
"Config:"
+ pprint.pformat(temp)
+ "\nSamples:\n"
+ pprint.pformat(self.samples)
)
def getPlugin(self, name, s=None):
"""Return a reference to a Python object (not an instance) referenced by passed name"""
"""
APPPERF-263:
make sure we look in __outputPlugins as well. For some reason we
keep 2 separate dicts of plugins.
"""
plugintype = name.split(".")[0]
if name not in self.plugins and name not in self.outputPlugins:
# 2/1/15 CS If we haven't already seen the plugin, try to load it
# Note, this will only work for plugins which do not specify config validation
# parameters. If they do, configs may not validate for user provided plugins.
if s:
if plugintype in ("generator", "rater"):
plugin = getattr(s, plugintype)
else:
plugin = getattr(s, "outputMode")
if plugin is not None:
logger.debug(
"Attempting to dynamically load plugintype '%s' named '%s' for sample '%s'"
% (plugintype, plugin, s.name)
)
bindir = os.path.join(s.sampleDir, os.pardir, "bin")
libdir = os.path.join(s.sampleDir, os.pardir, "lib")
plugindir = os.path.join(libdir, "plugins", plugintype)
targetplugin = PluginNotLoaded(
bindir=bindir,
libdir=libdir,
plugindir=plugindir,
name=plugin,
type=plugintype,
)
if targetplugin.name not in self.extraplugins:
self.extraplugins.append(targetplugin.name)
raise targetplugin
else:
raise FailedLoadingPlugin(name=plugin)
# APPPERF-263: consult both __outputPlugins and __plugins
if name not in self.plugins and name not in self.outputPlugins:
raise KeyError("Plugin " + name + " not found")
# return in order of precedence: __plugins, __outputPlugins, None
# Note: because of the above KeyError Exception we should never return
# None, but it is the sane behavior for a getter method
return self.plugins.get(name, self.outputPlugins.get(name, None))
def makeSplunkEmbedded(self, sessionKey):
self.sessionKey = sessionKey
self.splunkEmbedded = True
def getSplunkUrl(self, s):
"""
If we're embedded in Splunk, get it from Splunk's Python libraries, otherwise get it from config.
Returns a tuple of ( splunkUrl, splunkMethod, splunkHost, splunkPort )
"""
if self.splunkEmbedded:
try:
import splunk.auth
splunkUrl = splunk.auth.splunk.getLocalServerInfo()
results = re.match(r"(http|https)://([^:/]+):(\d+).*", splunkUrl)
splunkMethod = results.groups()[0]
splunkHost = results.groups()[1]
splunkPort = results.groups()[2]
except:
import traceback
trace = traceback.format_exc()
logger.error(
"Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s. Stacktrace: %s"
% (s.name, trace)
)
raise ValueError(
"Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s"
% s.name
)
else:
# splunkMethod and splunkPort are defaulted so only check for splunkHost
if s.splunkHost is None:
logger.error(
"Splunk URL Requested but splunkHost not set for sample '%s'"
% s.name
)
raise ValueError(
"Splunk URL Requested but splunkHost not set for sample '%s'"
% s.name
)
splunkUrl = "%s://%s:%s" % (s.splunkMethod, s.splunkHost, s.splunkPort)
splunkMethod = s.splunkMethod
splunkHost = s.splunkHost
splunkPort = s.splunkPort
logger.debug(
"Getting Splunk URL: %s Method: %s Host: %s Port: %s"
% (splunkUrl, splunkMethod, splunkHost, splunkPort)
)
return (splunkUrl, splunkMethod, splunkHost, splunkPort)
def parse(self):
"""Parse configs from Splunk REST Handler or from files.
We get called manually instead of in __init__ because we need find out if we're Splunk embedded before
we figure out how to configure ourselves.
"""
self.samples = []
logger.debug("Parsing configuration files.")
self._buildConfDict()
# Set defaults config instance variables to 'global' section
# This establishes defaults for other stanza settings
if "global" in self._confDict:
for key, value in self._confDict["global"].items():
value = self._validateSetting("global", key, value)
setattr(self, key, value)
del self._confDict["global"]
if "default" in self._confDict:
del self._confDict["default"]
tempsamples = []
tempsamples2 = []
stanza_map = {}
stanza_list = []
for stanza in self._confDict:
stanza_list.append(stanza)
stanza_map[stanza] = []
for stanza, settings in self._confDict.items():
for stanza_item in stanza_list:
if stanza != stanza_item and re.match(stanza, stanza_item):
stanza_map[stanza_item].append(stanza)
# 1/16/16 CS Trying to clean up the need to have attributes hard coded into the Config object
# and instead go off the list of valid settings that could be set
for setting in self._validSettings:
if not hasattr(self, setting):
setattr(self, setting, None)
# Now iterate for the rest of the samples we've found
# We'll create Sample objects for each of them
for stanza, settings in self._confDict.items():
if self.sample is not None and self.sample != stanza:
logger.info(
"Skipping sample '%s' because of command line override", stanza
)
continue
sampleexists = False
for sample in self.samples:
if sample.name == stanza:
sampleexists = True
# If we see the sample in two places, use the first and ignore the second
if not sampleexists:
s = Sample(stanza)
s.splunkEmbedded = self.splunkEmbedded
s.updateConfig(self)
# Get the latest token number of the current stanza
last_token_number = 0
for key, value in settings.items():
if (
"token" in key
and key[6].isdigit()
and int(key[6]) > last_token_number
):
last_token_number = int(key[6])
# Apply global tokens to the current stanza
kv_pair_items = list(settings.items())
if stanza in stanza_map:
for global_stanza in stanza_map[stanza]:
i = 0
# Scan for tokens first
while True:
if (
"token.{}.token".format(i)
in self._confDict[global_stanza]
):
token = self._confDict[global_stanza].get(
"token.{}.token".format(i)
)
replacement = self._confDict[global_stanza].get(
"token.{}.replacement".format(i)
)
replacementType = self._confDict[global_stanza].get(
"token.{}.replacementType".format(i)
)
last_token_number += 1
if token:
k = "token.{}.token".format(last_token_number)
v = token
kv_pair_items.append((k, v))
if replacement:
k = "token.{}.replacement".format(last_token_number)
v = replacement
kv_pair_items.append((k, v))
if replacementType:
k = "token.{}.replacementType".format(
last_token_number
)
v = replacementType
kv_pair_items.append((k, v))
i += 1
else:
break
keys = list(settings.keys())
for k, v in self._confDict[global_stanza].items():
if "token" not in k and k not in keys:
kv_pair_items.append((k, v))
for key, value in kv_pair_items:
oldvalue = value
try:
value = self._validateSetting(stanza, key, value)
except ValueError:
# If we're improperly formatted, skip to the next item
continue
# If we're a tuple, then this must be a token
if type(value) == tuple:
# Token indices could be out of order, so we must check to
# see whether we have enough items in the list to update the token
# In general this will keep growing the list by whatever length we need
if key.find("host.") > -1:
# logger.info("hostToken.{} = {}".format(value[1],oldvalue))
if not isinstance(s.hostToken, Token):
s.hostToken = Token(s)
# default hard-coded for host replacement
s.hostToken.replacementType = "file"
setattr(s.hostToken, value[0], oldvalue)
else:
if len(s.tokens) <= value[0]:
x = (value[0] + 1) - len(s.tokens)
s.tokens.extend([None for num in range(0, x)])
if not isinstance(s.tokens[value[0]], Token):
s.tokens[value[0]] = Token(s)
# logger.info("token[{}].{} = {}".format(value[0],value[1],oldvalue))
setattr(s.tokens[value[0]], value[1], oldvalue)
elif key == "eai:acl":
setattr(s, "app", value["app"])
else:
setattr(s, key, value)
# 6/22/12 CS Need a way to show a setting was set by the original
# config read
s._lockedSettings.append(key)
# logger.debug("Appending '%s' to locked settings for sample '%s'" % (key, s.name))
# Validate all the tokens are fully setup, can't do this in _validateSettings
# because they come over multiple lines
# Don't error out at this point, just log it and remove the token and move on
deleteidx = []
for i in range(0, len(s.tokens)):
t = s.tokens[i]
# If the index doesn't exist at all
if t is None:
logger.error("Token at index %s invalid" % i)
# Can't modify list in place while we're looping through it
# so create a list to remove later
deleteidx.append(i)
elif (
t.token is None
or t.replacementType is None
or t.replacement is None
):
logger.error("Token at index %s invalid" % i)
deleteidx.append(i)
newtokens = []
for i in range(0, len(s.tokens)):
if i not in deleteidx:
newtokens.append(s.tokens[i])
s.tokens = newtokens
# Must have eai:acl key to determine app name which determines where actual files are
if s.app is None:
logger.error(
"App not set for sample '%s' in stanza '%s'" % (s.name, stanza)
)
raise ValueError(
"App not set for sample '%s' in stanza '%s'" % (s.name, stanza)
)
# Set defaults for items not included in the config file
for setting in self._defaultableSettings:
if not hasattr(s, setting) or getattr(s, setting) is None:
setattr(s, setting, getattr(self, setting, None))
# Append to temporary holding list
if not s.disabled:
s._priority = len(tempsamples) + 1
tempsamples.append(s)
# 6/22/12 CS Rewriting the config matching code yet again to handling flattening better.
# In this case, we're now going to match all the files first, create a sample for each of them
# and then take the match from the sample seen last in the config file, and apply settings from
# every other match to that one.
for s in tempsamples:
# Now we need to match this up to real files. May generate multiple copies of the sample.
foundFiles = []
# 1/5/14 Adding a config setting to override sample directory, primarily so I can put tests in their own
# directories
if s.sampleDir is None:
logger.debug(
"Sample directory not specified in config, setting based on standard"
)
if self.splunkEmbedded and not STANDALONE:
s.sampleDir = os.path.normpath(
os.path.join(
self.grandparentdir,
os.path.pardir,
os.path.pardir,
os.path.pardir,
s.app,
self.DEFAULT_SAMPLE_DIR,
)
)
else:
# 2/1/15 CS Adding support for looking for samples based on the config file specified on
# the command line.
if self.configfile:
base_dir = (
os.path.dirname(self.configfile)
if os.path.isdir(self.configfile)
else os.path.dirname(os.path.dirname(self.configfile))
)
s.sampleDir = os.path.join(base_dir, self.DEFAULT_SAMPLE_DIR)
else:
s.sampleDir = os.path.join(os.getcwd(), self.DEFAULT_SAMPLE_DIR)
if not os.path.exists(s.sampleDir):
newSampleDir = os.path.join(
self.grandparentdir, self.DEFAULT_SAMPLE_DIR
)
logger.error(
"Path not found for samples '%s', trying '%s'"
% (s.sampleDir, newSampleDir)
)
s.sampleDir = newSampleDir
else:
if not os.path.isabs(s.sampleDir):
# relative path use the conffile dir as the base dir
logger.debug(
"Sample directory specified in config, checking for relative"
)
base_path = (
self.configfile
if os.path.isdir(self.configfile)
else os.path.dirname(self.configfile)
)
s.sampleDir = os.path.join(base_path, s.sampleDir)
# do nothing when sampleDir is absolute path
# 2/1/15 CS Adding support for command line options, specifically running a single sample
# from the command line
self.run_sample = True
if self.run_sample:
# Name doesn't match, disable
# if s.name != self.run_sample:
# logger.debug("Disabling sample '%s' because of command line override" % s.name)
# s.disabled = True
# # Name matches
# else:
# logger.debug("Sample '%s' selected from command line" % s.name)
# Also, can't backfill search if we don't know how to talk to Splunk
s.backfillSearch = None
s.backfillSearchUrl = None
# Since the user is running this for debug output, lets assume that they
# always want to see output
self.maxIntervalsBeforeFlush = 1
s.maxIntervalsBeforeFlush = 1
s.maxQueueLength = s.maxQueueLength or 1
logger.debug(
"Sample '%s' setting maxQueueLength to '%s' from command line"
% (s.name, s.maxQueueLength)
)
if self.override_outputter:
logger.debug(
"Sample '%s' setting output to '%s' from command line"
% (s.name, self.override_outputter)
)
s.outputMode = self.override_outputter
if self.override_count:
logger.debug(
"Overriding count to '%d' for sample '%s'"
% (self.override_count, s.name)
)
s.count = self.override_count
# If we're specifying a count, turn off backfill
s.backfill = None
if self.override_interval:
logger.debug(
"Overriding interval to '%d' for sample '%s'"
% (self.override_interval, s.name)
)
s.interval = self.override_interval
if self.override_backfill:
logger.debug(
"Overriding backfill to '%s' for sample '%s'"
% (self.override_backfill, s.name)
)
s.backfill = self.override_backfill.lstrip()
if self.override_end:
logger.debug(
"Overriding end to '%s' for sample '%s'"
% (self.override_end, s.name)
)
s.end = self.override_end.lstrip()
if s.mode == "replay" and not s.end:
s.end = 1
# Now that we know where samples will be written,
# Loop through tokens and load state for any that are integerid replacementType
for token in s.tokens:
if token.replacementType == "integerid":
try:
stateFile = open(
os.path.join(
s.sampleDir,
"state."
+ six.moves.urllib.request.pathname2url(token.token),
),
"r",
)
token.replacement = stateFile.read()
stateFile.close()
# The file doesn't exist, use the default value in the config
except (IOError, ValueError):
token.replacement = token.replacement
if os.path.exists(s.sampleDir):
sampleFiles = os.listdir(s.sampleDir)
for sample in sampleFiles:
sample_name = s.name
results = re.match(sample_name, sample)
if (
s.sampletype == "csv"
and not s.name.endswith(".csv")
and not results
):
logger.warning(
"Could not find target csv, try adding .csv into stanza title and filename"
)
if results:
# Make sure the stanza name/regex matches the entire file name
match_start, match_end = results.regs[0]
if match_end - match_start == len(sample):
logger.debug(
"Matched file {0} with sample name {1}".format(
results.group(0), s.name
)
)
# Store original name for future regex matching
s._origName = s.name
samplePath = os.path.join(s.sampleDir, sample)
if os.path.isfile(samplePath):
logger.debug(
"Found sample file '%s' for app '%s' using config '%s' with priority '%s'"
% (sample, s.app, s.name, s._priority)
+ "; adding to list"
)
foundFiles.append(samplePath)
# If we didn't find any files, log about it
if len(foundFiles) == 0:
logger.warning("Sample '%s' in config but no matching files" % s.name)
# 1/23/14 Change in behavior, go ahead and add the sample even if we don't find a file
# 9/16/15 Change bit us, now only append if we're a generator other than the two stock generators
if not s.disabled and not (
s.generator == "default" or s.generator == "replay"
):
tempsamples2.append(s)
for f in foundFiles:
if re.search(s._origName, f):
s.filePath = f
# 12/3/13 CS TODO These are hard coded but should be handled via the modular config system
# Maybe a generic callback for all plugins which will modify sample based on the filename
# found?
# Override <SAMPLE> with real name
if s.outputMode == "spool" and s.spoolFile == self.spoolFile:
s.spoolFile = f.split(os.sep)[-1]
if s.outputMode == "file" and s.fileName is None:
if self.fileName:
s.fileName = self.fileName
logger.debug(
"Found a global fileName {}. Setting the sample fileName.".format(
self.fileName
)
)
elif s.spoolFile == self.spoolFile:
s.fileName = os.path.join(s.spoolDir, f.split(os.sep)[-1])
elif s.spoolFile is not None:
s.fileName = os.path.join(s.spoolDir, s.spoolFile)
s.name = f.split(os.sep)[-1]
if not s.disabled:
tempsamples2.append(s)
else:
logger.info(
"Sample '%s' for app '%s' is marked disabled."
% (s.name, s.app)
)
# Clear tempsamples, we're going to reuse it
tempsamples = []
# We're now going go through the samples and attempt to apply any matches from other stanzas
# This allows us to specify a wildcard at the beginning of the file and get more specific as we go on
# Loop through all samples, create a list of the master samples
for s in tempsamples2:
foundHigherPriority = False
othermatches = []
# If we're an exact match, don't go looking for higher priorities
if not s.name == s._origName:
for matchs in tempsamples2:
if (
matchs.filePath == s.filePath
and s._origName != matchs._origName
):
# We have a match, now determine if we're higher priority or not
# If this is a longer pattern or our match is an exact match
# then we're a higher priority match
if (
len(matchs._origName) > len(s._origName)
or matchs.name == matchs._origName
):
# if s._priority < matchs._priority:
logger.debug(
"Found higher priority for sample '%s' with priority '%s' from sample "
% (s._origName, s._priority)
+ "'%s' with priority '%s'"
% (matchs._origName, matchs._priority)
)
foundHigherPriority = True
break
else:
othermatches.append(matchs._origName)
if not foundHigherPriority:
logger.debug(
"Chose sample '%s' from samples '%s' for file '%s'"
% (s._origName, othermatches, s.name)
)
tempsamples.append(s)
# Now we have two lists, tempsamples which contains only the highest priority matches, and
# tempsamples2 which contains all matches. We need to now flatten the config in order to
# take all the configs which might match.
# Reversing tempsamples2 in order to look from the bottom of the file towards the top
# We want entries lower in the file to override entries higher in the file
tempsamples2.reverse()
# Loop through all samples
for s in tempsamples:
# Now loop through the samples we've matched with files to see if we apply to any of them
for overridesample in tempsamples2:
if (
s.filePath == overridesample.filePath
and s._origName != overridesample._origName
):
# Now we're going to loop through all valid settings and set them assuming
# the more specific object that we've matched doesn't already have them set
for settingname in self._validSettings:
if settingname not in [
"eai:acl",
"blacklist",
"disabled",
"name",
]:
# 7/16/14 CS For some reason default settings are suddenly erroring
# not sure why, but lets just move on
try:
sourcesetting = getattr(overridesample, settingname)
destsetting = getattr(s, settingname)
# We want to check that the setting we're copying to hasn't been
# set, otherwise keep the more specific value
# 6/22/12 CS Added support for non-overrideable (locked) settings
# logger.debug("Locked settings: %s" % pprint.pformat(matchs._lockedSettings))
# if settingname in matchs._lockedSettings:
# logger.debug("Matched setting '%s' in sample '%s' lockedSettings"
# % (settingname, matchs.name))
if (
(
destsetting is None
or destsetting == getattr(self, settingname)
)
and sourcesetting is not None
and sourcesetting != getattr(self, settingname)
and settingname not in s._lockedSettings
):
logger.debug(
"Overriding setting '%s' with value '%s' from sample '%s' to "
% (
settingname,
sourcesetting,
overridesample._origName,
)
+ "sample '%s' in app '%s'" % (s.name, s.app)
)
setattr(s, settingname, sourcesetting)
except AttributeError:
pass
# Now prepend all the tokens to the beginning of the list so they'll be sure to match first
newtokens = s.tokens
# logger.debug("Prepending tokens from sample '%s' to sample '%s' in app '%s': %s" \
# % (overridesample._origName, s.name, s.app, pprint.pformat(newtokens)))
newtokens.extend(overridesample.tokens)
s.tokens = newtokens
# We've added replay mode, so lets loop through the samples again and set the earliest and latest
# settings for any samples that were set to replay mode
for s in tempsamples:
# We've added replay mode, so lets loop through the samples again and set the earliest and latest
# settings for any samples that were set to replay mode
if s.perDayVolume:
logger.info(
"Stanza contains per day volume, changing rater and generator to perdayvolume instead of count"
)
s.rater = "perdayvolume"
s.count = 1
s.generator = "perdayvolumegenerator"
elif s.mode == "replay":
logger.debug("Setting defaults for replay samples")
s.earliest = "now" if not s.earliest else s.earliest
s.latest = "now" if not s.latest else s.latest
s.count = 1
s.randomizeCount = None
s.hourOfDayRate = None
s.dayOfWeekRate = None
s.minuteOfHourRate = None
s.interval = 0 if not s.interval else s.interval
# 12/29/13 CS Moved replay generation to a new replay generator plugin
s.generator = "replay"
# 5/14/20 - Instead of using a static default source, leave source empty by default and
# set it to the sample file name unless otherwise specified.
if not s.source:
sample_path = s.filePath if s.filePath else s.generator
s.source = os.path.basename(sample_path)
self.samples = tempsamples
self._confDict = None
# 9/2/15 Try autotimestamp values, add a timestamp if we find one
for s in self.samples:
if s.generator == "default":
s.loadSample()
if s.autotimestamp:
at = self.autotimestamps
line_puncts = []
# Check for _time field, if it exists, add a timestamp to support it
if len(s.sampleDict) > 0:
if "_time" in s.sampleDict[0]:
logger.debug(
"Found _time field, checking if default timestamp exists"
)
t = Token()
t.token = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}"
t.replacementType = "timestamp"
t.replacement = "%Y-%m-%dT%H:%M:%S.%f"
found_token = False
# Check to see if we're already a token
for st in s.tokens:
if (
st.token == t.token
and st.replacement == t.replacement
):
found_token = True
break
if not found_token:
logger.debug("Found _time adding timestamp to support")
s.tokens.append(t)
else:
logger.debug(
"_time field exists and timestamp already configured"
)
for e in s.sampleDict:
# Run punct against the line, make sure we haven't seen this same pattern
# Not totally exact but good enough for Rock'N'Roll
p = self._punct(e["_raw"])
logger.debug(
"Got punct of '%s' for event '%s'" % (p, e[s.timeField])
)
if p not in line_puncts:
for x in at:
t = Token()
t.token = x[0]
t.replacementType = "timestamp"
t.replacement = x[1]
try:
logger.debug(
"Trying regex '%s' for format '%s' on '%s'"
% (x[0], x[1], e[s.timeField])
)
ts = s.getTSFromEvent(e["_raw"], t)
if type(ts) == datetime.datetime:
found_token = False
# Check to see if we're already a token
for st in s.tokens:
if (
st.token == t.token
and st.replacement == t.replacement
):
found_token = True
break
if not found_token:
logger.debug(
"Found timestamp '%s', extending token with format '%s'"
% (x[0], x[1])
)
s.tokens.append(t)
# Drop this pattern from ones we try in the future
at = [z for z in at if z[0] != x[0]]
break
except ValueError:
pass
line_puncts.append(p)
logger.debug("Finished parsing")
def _punct(self, string):
"""Quick method of attempting to normalize like events"""
string = string.replace("\\", "\\\\")
string = string.replace('"', '\\"')
string = string.replace("'", "\\'")
string = string.replace(" ", "_")
string = string.replace("\t", "t")
string = re.sub(
r"[^,;\-#\$%&+./:=\?@\\\'|*\n\r\"(){}<>\[\]\^!]", "", string, flags=re.M
)
return string
def _validateSetting(self, stanza, key, value):
"""Validates settings to ensure they won't cause errors further down the line.
Returns a parsed value (if the value is something other than a string).
If we've read a token, which is a complex config, returns a tuple of parsed values."""
logger.debug(
"Validating setting for '%s' with value '%s' in stanza '%s'"
% (key, value, stanza)
)
if key.find("token.") > -1:
results = re.match(r"token\.(\d+)\.(\w+)", key)
if results is not None:
groups = results.groups()
if groups[1] not in self._validTokenTypes:
logger.error(
"Could not parse token index '%s' token type '%s' in stanza '%s'"
% (groups[0], groups[1], stanza)
)
raise ValueError(
"Could not parse token index '%s' token type '%s' in stanza '%s'"
% (groups[0], groups[1], stanza)
)
if groups[1] == "replacementType":
if value not in self._validReplacementTypes:
logger.error(
"Invalid replacementType '%s' for token index '%s' in stanza '%s'"
% (value, groups[0], stanza)
)
raise ValueError(
"Could not parse token index '%s' token type '%s' in stanza '%s'"
% (groups[0], groups[1], stanza)
)
return int(groups[0]), groups[1]
elif key.find("host.") > -1:
results = re.match(r"host\.(\w+)", key)
if results is not None:
groups = results.groups()
if groups[0] not in self._validHostTokens:
logger.error(
"Could not parse host token type '%s' in stanza '%s'"
% (groups[0], stanza)
)
raise ValueError(
"Could not parse host token type '%s' in stanza '%s'"
% (groups[0], stanza)
)
return groups[0], value
elif key in self._validSettings:
if key in self._intSettings:
try:
value = int(value)
except:
logger.error(
"Could not parse int for '%s' in stanza '%s'" % (key, stanza)
)
raise ValueError(
"Could not parse int for '%s' in stanza '%s'" % (key, stanza)
)
elif key in self._floatSettings:
try:
value = float(value)
except:
logger.error(
"Could not parse float for '%s' in stanza '%s'" % (key, stanza)
)
raise ValueError(
"Could not parse float for '%s' in stanza '%s'" % (key, stanza)
)
elif key in self._boolSettings:
try:
# Splunk gives these to us as a string '0' which bool thinks is True
# ConfigParser gives 'false', so adding more strings
if value in ("0", "false", "False"):
value = 0
value = bool(value)
except:
logger.error(
"Could not parse bool for '%s' in stanza '%s'" % (key, stanza)
)
raise ValueError(
"Could not parse bool for '%s' in stanza '%s'" % (key, stanza)
)
elif key in self._jsonSettings:
try:
value = json.loads(value)
except:
logger.error(
"Could not parse json for '%s' in stanza '%s'" % (key, stanza)
)
raise ValueError(
"Could not parse json for '%s' in stanza '%s'" % (key, stanza)
)
# 12/3/13 CS Adding complex settings, which is a dictionary with the key containing
# the config item name and the value is a list of valid values or a callback function
# which will parse the value or raise a ValueError if it is unparseable
elif key in self._complexSettings:
complexSetting = self._complexSettings[key]
logger.debug("Complex setting for '%s' in stanza '%s'" % (key, stanza))
# Set value to result of callback, e.g. parsed, or the function should raise an error
if isinstance(complexSetting, types.FunctionType) or isinstance(
complexSetting, types.MethodType
):
logger.debug(
"Calling function for setting '%s' with value '%s'"
% (key, value)
)
value = complexSetting(value)
elif isinstance(complexSetting, list):
if key == "threading" and self.threading == "process":
value = self.threading
if value not in complexSetting:
logger.error(
"Setting '%s' is invalid for value '%s' in stanza '%s'"
% (key, value, stanza)
)
raise ValueError(
"Setting '%s' is invalid for value '%s' in stanza '%s'"
% (key, value, stanza)
)
else:
# Notifying only if the setting isn't valid and continuing on
# This will allow future settings to be added and be backwards compatible
logger.info(
"Key '%s' in stanza '%s' may not be a valid setting" % (key, stanza)
)
return value
def _validateTimezone(self, value):
"""Callback for complexSetting timezone which will parse and validate the timezone"""
logger.debug("Parsing timezone {}".format(value))
if value.find("local") >= 0:
value = datetime.timedelta(days=1)
else:
try:
# Separate the hours and minutes (note: minutes = the int value - the hour portion)
if int(value) > 0:
mod = 100
else:
mod = -100
value = datetime.timedelta(
hours=int(int(value) / 100.0), minutes=int(value) % mod
)
except:
logger.error("Could not parse timezone {}".format(value))
raise ValueError("Could not parse timezone {}".format(value))
logger.debug("Parsed timezone {}".format(value))
return value
def _validateSeed(self, value):
"""Callback to set random seed"""
logger.debug("Validating random seed {}".format(value))
try:
value = int(value)
except:
logger.error("Could not parse int for seed {}".format(value))
raise ValueError("Could not parse int for seed {}".format(value))
logger.info("Using random seed {}".format(value))
random.seed(value)
def _buildConfDict(self):
"""Build configuration dictionary that we will use """
# Abstracts grabbing configuration from Splunk or directly from Configuration Files
if self.splunkEmbedded and not STANDALONE:
logger.info("Retrieving eventgen configurations from /configs/eventgen")
import splunk.entity as entity
self._confDict = entity.getEntities(
"configs/conf-eventgen", count=-1, sessionKey=self.sessionKey
)
else:
logger.info("Retrieving eventgen configurations with ConfigParser()")
# We assume we're in a bin directory and that there are default and local directories
conf = RawConfigParser()
# Make case sensitive
conf.optionxform = str
conffiles = []
# 2/1/15 CS Moving to argparse way of grabbing command line parameters
if self.configfile:
if os.path.exists(self.configfile):
# 2/1/15 CS Adding a check to see whether we're instead passed a directory
# In which case we'll assume it's a splunk app and look for config files in
# default and local
if os.path.isdir(self.configfile):
conffiles = [
os.path.join(
self.grandparentdir, "default", "eventgen.conf"
),
os.path.join(self.configfile, "default", "eventgen.conf"),
os.path.join(self.configfile, "local", "eventgen.conf"),
]
else:
conffiles = [
os.path.join(
self.grandparentdir, "default", "eventgen.conf"
),
self.configfile,
]
if len(conffiles) == 0:
conffiles = [
os.path.join(self.grandparentdir, "default", "eventgen.conf"),
os.path.join(self.grandparentdir, "local", "eventgen.conf"),
]
logger.debug(
"Reading configuration files for non-splunkembedded: %s" % conffiles
)
conf.read(conffiles)
sections = conf.sections()
ret = {}
for section in sections:
ret[section] = dict(conf.items(section))
# For compatibility with Splunk's configs, need to add the app name to an eai:acl key
ret[section]["eai:acl"] = {"app": self.grandparentdir.split(os.sep)[-1]}
self._confDict = ret
logger.debug("ConfDict returned %s" % pprint.pformat(dict(self._confDict)))
|
|
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on opportunity models."""
from __future__ import annotations
import collections
import logging
from core.constants import constants
from core.domain import exp_fetchers
from core.domain import opportunity_domain
from core.domain import question_fetchers
from core.domain import story_fetchers
from core.domain import suggestion_services
from core.domain import topic_fetchers
from core.platform import models
(opportunity_models, suggestion_models) = models.Registry.import_models(
[models.NAMES.opportunity, models.NAMES.suggestion])
# NOTE TO DEVELOPERS: The functions:
# - delete_all_exploration_opportunity_summary_models()
# - delete_all_skill_opportunity_models()
# were removed in #13021 as part of the migration to Apache Beam. Please refer
# to that PR if you need to reinstate them.
def is_exploration_available_for_contribution(exp_id):
"""Checks whether a given exploration id belongs to a curated list of
exploration i.e, whether it's used as the chapter of any story.
Args:
exp_id: str. The id of the exploration which is needed to be checked.
Returns:
bool. Whether the given exp_id belongs to the curated explorations.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(
exp_id, strict=False)
return model is not None
def get_exploration_opportunity_summary_from_model(model):
"""Returns the ExplorationOpportunitySummary object out of the model.
Args:
model: ExplorationOpportunitySummaryModel. The exploration opportunity
summary model.
Returns:
ExplorationOpportunitySummary. The corresponding
ExplorationOpportunitySummary object.
"""
# We're making sure that the audio language codes in any exploration
# opportunity domain object match the ones in
# constants.SUPPORTED_AUDIO_LANGUAGES.
set_of_all_languages = set(
model.incomplete_translation_language_codes +
model.language_codes_needing_voice_artists +
model.language_codes_with_assigned_voice_artists)
supported_language_codes = set(
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES)
missing_language_codes = list(
supported_language_codes - set_of_all_languages)
if missing_language_codes:
logging.info(
'Missing language codes %s in exploration opportunity model with '
'id %s' % (missing_language_codes, model.id))
new_incomplete_translation_language_codes = (
model.incomplete_translation_language_codes + missing_language_codes)
return opportunity_domain.ExplorationOpportunitySummary(
model.id, model.topic_id, model.topic_name, model.story_id,
model.story_title, model.chapter_title, model.content_count,
new_incomplete_translation_language_codes, model.translation_counts,
model.language_codes_needing_voice_artists,
model.language_codes_with_assigned_voice_artists,
{})
def _save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list):
"""Stores multiple ExplorationOpportunitySummary into datastore as a
ExplorationOpportunitySummaryModel.
Args:
exploration_opportunity_summary_list: list(
ExplorationOpportunitySummary). A list of exploration opportunity
summary object.
"""
exploration_opportunity_summary_model_list = []
for opportunity_summary in exploration_opportunity_summary_list:
model = opportunity_models.ExplorationOpportunitySummaryModel(
id=opportunity_summary.id,
topic_id=opportunity_summary.topic_id,
topic_name=opportunity_summary.topic_name,
story_id=opportunity_summary.story_id,
story_title=opportunity_summary.story_title,
chapter_title=opportunity_summary.chapter_title,
content_count=opportunity_summary.content_count,
incomplete_translation_language_codes=(
opportunity_summary.incomplete_translation_language_codes),
translation_counts=opportunity_summary.translation_counts,
language_codes_needing_voice_artists=(
opportunity_summary.language_codes_needing_voice_artists),
language_codes_with_assigned_voice_artists=(
opportunity_summary.language_codes_with_assigned_voice_artists)
)
exploration_opportunity_summary_model_list.append(model)
(
opportunity_models.ExplorationOpportunitySummaryModel
.update_timestamps_multi(exploration_opportunity_summary_model_list))
opportunity_models.ExplorationOpportunitySummaryModel.put_multi(
exploration_opportunity_summary_model_list)
def create_exp_opportunity_summary(topic, story, exploration):
"""Create an ExplorationOpportunitySummary object with the given topic,
story and exploration object.
Args:
topic: Topic. The topic object to which the opportunity belongs.
story: Story. The story object to which the opportunity belongs.
exploration: Exploration. The exploration object to which the
opportunity belongs.
Returns:
ExplorationOpportunitySummary. The exploration opportunity summary
object.
"""
# TODO(#13903): Find a way to reduce runtime of computing the complete
# languages.
complete_translation_language_list = (
exploration.get_languages_with_complete_translation())
# TODO(#13912): Revisit voiceover language logic.
language_codes_needing_voice_artists = set(
complete_translation_language_list)
incomplete_translation_language_codes = (
_compute_exploration_incomplete_translation_languages(
complete_translation_language_list))
if exploration.language_code in incomplete_translation_language_codes:
# Remove exploration language from incomplete translation languages list
# as an exploration does not need a translation in its own language.
incomplete_translation_language_codes.remove(exploration.language_code)
# Add exploration language to voiceover required languages list as an
# exploration can be voiceovered in its own language.
language_codes_needing_voice_artists.add(exploration.language_code)
content_count = exploration.get_content_count()
translation_counts = exploration.get_translation_counts()
story_node = story.story_contents.get_node_with_corresponding_exp_id(
exploration.id)
# TODO(#7376): Once the voiceover application functionality is
# implemented change this method such that it also populates the
# language_codes_with_assigned_voice_artists with the required data.
exploration_opportunity_summary = (
opportunity_domain.ExplorationOpportunitySummary(
exploration.id, topic.id, topic.name, story.id, story.title,
story_node.title, content_count,
incomplete_translation_language_codes,
translation_counts, list(language_codes_needing_voice_artists), [],
{}))
return exploration_opportunity_summary
def _compute_exploration_incomplete_translation_languages(
complete_translation_languages):
"""Computes all languages that are not 100% translated in an exploration.
Args:
complete_translation_languages: list(str). List of complete translation
language codes in the exploration.
Returns:
list(str). List of incomplete translation language codes sorted
alphabetically.
"""
audio_language_codes = set(
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES)
incomplete_translation_language_codes = (
audio_language_codes - set(complete_translation_languages))
return sorted(list(incomplete_translation_language_codes))
def add_new_exploration_opportunities(story_id, exp_ids):
"""Adds new exploration opportunity into the model.
Args:
story_id: str. ID of the story.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
story = story_fetchers.get_story_by_id(story_id)
topic = topic_fetchers.get_topic_by_id(story.corresponding_topic_id)
_create_exploration_opportunities(story, topic, exp_ids)
def _create_exploration_opportunities(story, topic, exp_ids):
"""Creates new exploration opportunities corresponding to the supplied
story, topic, and exploration IDs.
Args:
story: Story. The story domain object corresponding to the exploration
opportunities.
topic: Topic. The topic domain object corresponding to the exploration
opportunities.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
explorations = exp_fetchers.get_multiple_explorations_by_id(exp_ids)
exploration_opportunity_summary_list = []
for exploration in explorations.values():
exploration_opportunity_summary_list.append(
create_exp_opportunity_summary(
topic, story, exploration))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_opportunity_with_updated_exploration(exp_id):
"""Updates the opportunities models with the changes made in the
exploration.
Args:
exp_id: str. The exploration id which is also the id of the opportunity
model.
"""
updated_exploration = exp_fetchers.get_exploration_by_id(exp_id)
content_count = updated_exploration.get_content_count()
translation_counts = updated_exploration.get_translation_counts()
# TODO(#13903): Find a way to reduce runtime of computing the complete
# languages.
complete_translation_language_list = (
updated_exploration.get_languages_with_complete_translation())
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.content_count = content_count
exploration_opportunity_summary.translation_counts = translation_counts
incomplete_translation_language_codes = (
_compute_exploration_incomplete_translation_languages(
complete_translation_language_list))
if (
updated_exploration.language_code
in incomplete_translation_language_codes):
# Remove exploration language from incomplete translation languages list
# as an exploration does not need a translation in its own language.
incomplete_translation_language_codes.remove(
updated_exploration.language_code)
exploration_opportunity_summary.incomplete_translation_language_codes = (
incomplete_translation_language_codes)
new_languages_for_voiceover = set(complete_translation_language_list) - set(
exploration_opportunity_summary.
language_codes_with_assigned_voice_artists)
# We only append new languages to language_codes_needing_voice_artists(
# instead of adding all of the complete_translation_language_list), as the
# complete translation languages list will be dynamic based on some
# content text are changed, where as the voiceover is a long term work and
# we can allow a voice_artist to work for an exploration which needs a
# little bit update in text translation.
language_codes_needing_voice_artists_set = set(
exploration_opportunity_summary.language_codes_needing_voice_artists)
language_codes_needing_voice_artists_set |= set(new_languages_for_voiceover)
exploration_opportunity_summary.language_codes_needing_voice_artists = list(
language_codes_needing_voice_artists_set)
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def update_exploration_opportunities_with_story_changes(story, exp_ids):
"""Updates the opportunities models with the story changes.
Args:
story: Story. The new story object.
exp_ids: list(str). A list of exploration IDs whose exploration
opportunity summary models need to be updated.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.story_title = story.title
node = story.story_contents.get_node_with_corresponding_exp_id(
exploration_opportunity_summary.id)
exploration_opportunity_summary.chapter_title = node.title
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_exploration_voiceover_opportunities(
exp_id, assigned_voice_artist_in_language_code):
"""Updates the language_codes_with_assigned_voice_artists of exploration
opportunity model.
Args:
exp_id: str. The ID of the exploration.
assigned_voice_artist_in_language_code: str. The language code in which
a voice artist is assigned to the exploration.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.language_codes_needing_voice_artists.remove(
assigned_voice_artist_in_language_code)
(
exploration_opportunity_summary
.language_codes_with_assigned_voice_artists.append(
assigned_voice_artist_in_language_code))
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def delete_exploration_opportunities(exp_ids):
"""Deletes the ExplorationOpportunitySummaryModel models corresponding to
the given exp_ids.
Args:
exp_ids: list(str). A list of exploration IDs whose opportunity summary
models are to be deleted.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exp_opportunity_models_to_be_deleted = [
model for model in exp_opportunity_models
if model is not None]
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models_to_be_deleted)
def delete_exploration_opportunities_corresponding_to_topic(topic_id):
"""Deletes the ExplorationOpportunitySummaryModel models which corresponds
to the given topic_id.
Args:
topic_id: str. The ID of the topic.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
def update_exploration_opportunities(old_story, new_story):
"""Updates the opportunities models according to the changes made in the
story.
Args:
old_story: Story. The old story object which is now updated.
new_story: Story. The new story object.
"""
model_ids_need_update = set([])
exp_ids_in_old_story = old_story.story_contents.get_all_linked_exp_ids()
exp_ids_in_new_story = new_story.story_contents.get_all_linked_exp_ids()
new_added_exp_ids = set(exp_ids_in_new_story) - set(exp_ids_in_old_story)
deleted_exp_ids = set(exp_ids_in_old_story) - set(exp_ids_in_new_story)
unchanged_exp_ids = set(exp_ids_in_new_story) - new_added_exp_ids
if old_story.title != new_story.title:
model_ids_need_update |= set(unchanged_exp_ids)
else:
for exp_id in unchanged_exp_ids:
new_node = (
new_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
old_node = (
old_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
if old_node.title != new_node.title:
model_ids_need_update.add(exp_id)
update_exploration_opportunities_with_story_changes(
new_story, list(model_ids_need_update))
add_new_exploration_opportunities(new_story.id, new_added_exp_ids)
delete_exploration_opportunities(list(deleted_exp_ids))
def delete_exp_opportunities_corresponding_to_story(story_id):
"""Deletes the ExplorationOpportunitySummaryModel models which corresponds
to the given story_id.
Args:
story_id: str. The ID of the story.
"""
exp_opprtunity_model_class = (
opportunity_models.ExplorationOpportunitySummaryModel)
exp_opportunity_models = exp_opprtunity_model_class.get_all().filter(
exp_opprtunity_model_class.story_id == story_id
)
exp_opprtunity_model_class.delete_multi(exp_opportunity_models)
def get_translation_opportunities(language_code, topic_name, cursor):
"""Returns a list of opportunities available for translation in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which translation opportunities
should be fetched.
topic_name: str or None. The topic for which translation opportunities
should be fetched. If topic_name is None or empty, fetch
translation opportunities from all topics.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next batch of
results. If there are no more results, this might be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, cursor, more = (
opportunity_models
.ExplorationOpportunitySummaryModel.get_all_translation_opportunities(
page_size, cursor, language_code, topic_name))
opportunity_summaries = []
opportunity_summary_exp_ids = [
opportunity.id for opportunity in exp_opportunity_summary_models]
exp_id_to_in_review_count = {}
if len(opportunity_summary_exp_ids) > 0:
exp_id_to_in_review_count = (
_build_exp_id_to_translation_suggestion_in_review_count(
opportunity_summary_exp_ids, language_code))
for exp_opportunity_summary_model in exp_opportunity_summary_models:
opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
if opportunity_summary.id in exp_id_to_in_review_count:
# Compute the translation_in_review_counts domain object field
# adhoc. Note that this field is not persisted and is only used in
# the frontend.
# TODO(#14833): Compute this value in the backend controller
# instead.
opportunity_summary.translation_in_review_counts = {
language_code: exp_id_to_in_review_count[opportunity_summary.id]
}
opportunity_summaries.append(opportunity_summary)
return opportunity_summaries, cursor, more
def _build_exp_id_to_translation_suggestion_in_review_count(
exp_ids, language_code):
"""Returns a dict mapping exploration ID to the count of corresponding
translation suggestions that are currently in review.
Args:
exp_ids: list(str). List of exploration IDs for which to count
corresponding translations suggestions.
language_code: str. The language for which translation suggestions
should be fetched.
Returns:
dict(str, int). Dict of exploration IDs to counts of corresponding
translation suggestions currently in review.
"""
exp_id_to_in_review_count = collections.defaultdict(int)
suggestions_in_review = (
suggestion_services
.get_translation_suggestions_in_review_by_exp_ids(
exp_ids, language_code))
for suggestion in suggestions_in_review:
if suggestion is not None:
exp_id_to_in_review_count[suggestion.target_id] += 1
return exp_id_to_in_review_count
def get_voiceover_opportunities(language_code, cursor):
"""Returns a list of opportunities available for voiceover in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which voiceover opportunities
to be fetched.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, new_cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_voiceover_opportunities(page_size, cursor, language_code))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
return opportunities, new_cursor, more
def get_exploration_opportunity_summaries_by_ids(ids):
"""Returns a dict with key as id and value representing
ExplorationOpportunitySummary objects corresponding to the opportunity id.
Args:
ids: list(str). A list of opportunity ids.
Returns:
dict(str, ExplorationOpportunitySummary|None). A dict with key as the
opportunity id and values representing the ExplorationOpportunitySummary
domain objects corresponding to the opportunity id if exist else None.
"""
opportunities = {opportunity_id: None for opportunity_id in ids}
exp_opportunity_summary_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(ids))
for exp_opportunity_summary_model in exp_opportunity_summary_models:
if exp_opportunity_summary_model is not None:
opportunities[exp_opportunity_summary_model.id] = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
return opportunities
def update_opportunities_with_new_topic_name(topic_id, topic_name):
"""Updates the exploration opportunity summary models with new topic name.
Args:
topic_id: str. The corresponding topic id of the opportunity.
topic_name: str. The new topic name.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.topic_name = topic_name
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def get_skill_opportunity_from_model(model):
"""Returns a SkillOpportunity domain object from a SkillOpportunityModel.
Args:
model: SkillOpportunityModel. The skill opportunity model.
Returns:
SkillOpportunity. The corresponding SkillOpportunity object.
"""
return opportunity_domain.SkillOpportunity(
model.id, model.skill_description, model.question_count)
def get_skill_opportunities(cursor):
"""Returns a list of skill opportunities available for questions.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
skill_opportunity_models, cursor, more = (
opportunity_models.SkillOpportunityModel
.get_skill_opportunities(constants.OPPORTUNITIES_PAGE_SIZE, cursor))
opportunities = []
for skill_opportunity_model in skill_opportunity_models:
skill_opportunity = (
get_skill_opportunity_from_model(skill_opportunity_model))
opportunities.append(skill_opportunity)
return opportunities, cursor, more
def get_skill_opportunities_by_ids(ids):
"""Returns a list of SkillOpportunity domain objects corresponding to the
given list of ids.
Args:
ids: list(str). A list of the opportunity ids.
Returns:
dict(str, SkillOpportunity|None). A dict with key as the
opportunity id and values representing the SkillOpportunity
domain objects corresponding to the opportunity id if exist else None.
"""
opportunities = {opportunity_id: None for opportunity_id in ids}
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(ids))
for skill_opportunity_model in skill_opportunity_models:
if skill_opportunity_model is not None:
opportunities[skill_opportunity_model.id] = (
get_skill_opportunity_from_model(skill_opportunity_model))
return opportunities
def create_skill_opportunity(skill_id, skill_description):
"""Creates a SkillOpportunityModel entity in the datastore.
Args:
skill_id: str. The skill_id of the opportunity.
skill_description: str. The skill_description of the opportunity.
Raises:
Exception. If a SkillOpportunityModel corresponding to the supplied
skill_id already exists.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
raise Exception(
'SkillOpportunity corresponding to skill ID %s already exists.' % (
skill_id))
questions, _ = (
question_fetchers.get_questions_and_skill_descriptions_by_skill_ids(
constants.MAX_QUESTIONS_PER_SKILL, [skill_id], 0))
skill_opportunity = opportunity_domain.SkillOpportunity(
skill_id=skill_id,
skill_description=skill_description,
question_count=len(questions)
)
_save_skill_opportunities([skill_opportunity])
def _save_skill_opportunities(skill_opportunities):
"""Saves SkillOpportunity domain objects into datastore as
SkillOpportunityModel objects.
Args:
skill_opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
"""
skill_opportunity_models = []
for skill_opportunity in skill_opportunities:
skill_opportunity.validate()
model = opportunity_models.SkillOpportunityModel(
id=skill_opportunity.id,
skill_description=skill_opportunity.skill_description,
question_count=skill_opportunity.question_count,
)
skill_opportunity_models.append(model)
opportunity_models.SkillOpportunityModel.update_timestamps_multi(
skill_opportunity_models)
opportunity_models.SkillOpportunityModel.put_multi(skill_opportunity_models)
def update_skill_opportunity_skill_description(skill_id, new_description):
"""Updates the skill_description of the SkillOpportunityModel with
new_description.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
new_description: str. The new skill_description.
"""
skill_opportunity = _get_skill_opportunity(skill_id)
if skill_opportunity is not None:
skill_opportunity.skill_description = new_description
_save_skill_opportunities([skill_opportunity])
def _get_skill_opportunity(skill_id):
"""Returns the SkillOpportunity domain object representing a
SkillOpportunityModel with the supplied skill_id in the datastore.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
Returns:
SkillOpportunity or None. The domain object representing a
SkillOpportunity with the supplied skill_id, or None if it does not
exist.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
return get_skill_opportunity_from_model(skill_opportunity_model)
return None
def delete_skill_opportunity(skill_id):
"""Deletes the SkillOpportunityModel corresponding to the supplied skill_id.
Args:
skill_id: str. The skill_id corresponding to the to-be-deleted
SkillOpportunityModel.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
opportunity_models.SkillOpportunityModel.delete(skill_opportunity_model)
def increment_question_counts(skill_ids, delta):
"""Increments question_count(s) of SkillOpportunityModel(s) with
corresponding skill_ids.
Args:
skill_ids: list(str). A list of skill_ids corresponding to
SkillOpportunityModel(s).
delta: int. The delta for which to increment each question_count.
"""
updated_skill_opportunities = (
_get_skill_opportunities_with_updated_question_counts(skill_ids, delta))
_save_skill_opportunities(updated_skill_opportunities)
def update_skill_opportunities_on_question_linked_skills_change(
old_skill_ids, new_skill_ids):
"""Updates question_count(s) of SkillOpportunityModel(s) corresponding to
the change in linked skill IDs for a question from old_skill_ids to
new_skill_ids, e.g. if skill_id1 is in old_skill_ids, but not in
new_skill_ids, the question_count of the SkillOpportunityModel for skill_id1
would be decremented.
NOTE: Since this method is updating the question_counts based on the change
of skill_ids from old_skill_ids to new_skill_ids, the input skill_id lists
must be related.
Args:
old_skill_ids: list(str). A list of old skill_id(s).
new_skill_ids: list(str). A list of new skill_id(s).
"""
old_skill_ids_set = set(old_skill_ids)
new_skill_ids_set = set(new_skill_ids)
new_skill_ids_added_to_question = new_skill_ids_set - old_skill_ids_set
skill_ids_removed_from_question = old_skill_ids_set - new_skill_ids_set
updated_skill_opportunities = []
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
new_skill_ids_added_to_question, 1))
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
skill_ids_removed_from_question, -1))
_save_skill_opportunities(updated_skill_opportunities)
def _get_skill_opportunities_with_updated_question_counts(skill_ids, delta):
"""Returns a list of SkillOpportunities with corresponding skill_ids
with question_count(s) updated by delta.
Args:
skill_ids: iterable(str). The IDs of the matching SkillOpportunityModels
in the datastore.
delta: int. The delta by which to update each question_count (can be
negative).
Returns:
list(SkillOpportunity). The updated SkillOpportunities.
"""
updated_skill_opportunities = []
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(skill_ids))
for skill_opportunity_model in skill_opportunity_models:
if skill_opportunity_model is not None:
skill_opportunity = get_skill_opportunity_from_model(
skill_opportunity_model)
skill_opportunity.question_count += delta
updated_skill_opportunities.append(skill_opportunity)
return updated_skill_opportunities
def regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=False):
"""Regenerates opportunity models which belongs to a given topic.
Args:
topic_id: str. The ID of the topic.
delete_existing_opportunities: bool. Whether to delete all the existing
opportunities related to the given topic.
Returns:
int. The number of opportunity models created.
Raises:
Exception. Failure to regenerate opportunities for given topic.
"""
if delete_existing_opportunities:
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
topic = topic_fetchers.get_topic_by_id(topic_id)
story_ids = topic.get_canonical_story_ids()
stories = story_fetchers.get_stories_by_ids(story_ids)
exp_ids = []
non_existing_story_ids = []
for index, story in enumerate(stories):
if story is None:
non_existing_story_ids.append(story_ids[index])
else:
exp_ids += story.story_contents.get_all_linked_exp_ids()
exp_ids_to_exp = exp_fetchers.get_multiple_explorations_by_id(
exp_ids, strict=False)
non_existing_exp_ids = set(exp_ids) - set(exp_ids_to_exp.keys())
if len(non_existing_exp_ids) > 0 or len(non_existing_story_ids) > 0:
raise Exception(
'Failed to regenerate opportunities for topic id: %s, '
'missing_exp_with_ids: %s, missing_story_with_ids: %s' % (
topic_id, list(non_existing_exp_ids), non_existing_story_ids))
exploration_opportunity_summary_list = []
for story in stories:
for exp_id in story.story_contents.get_all_linked_exp_ids():
exploration_opportunity_summary_list.append(
create_exp_opportunity_summary(
topic, story, exp_ids_to_exp[exp_id]))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
return len(exploration_opportunity_summary_list)
|
|
"""Tests for unix_events.py."""
import collections
import contextlib
import errno
import io
import os
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import trollius as asyncio
from trollius import log
from trollius import test_utils
from trollius import unix_events
from trollius.py33_exceptions import BlockingIOError, ChildProcessError
from trollius.test_utils import mock
MOCK_ANY = mock.ANY
def close_pipe_transport(transport):
# Don't call transport.close() because the event loop and the selector
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@test_utils.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
@asyncio.coroutine
def simple_coroutine():
yield None
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('trollius.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('trollius.unix_events.signal')
@mock.patch('trollius.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('trollius.unix_events.signal')
@mock.patch('trollius.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('trollius.unix_events.signal')
@mock.patch('trollius.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('trollius.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
with contextlib.closing(sock):
coro = self.loop.create_unix_server(lambda: None, path)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with contextlib.closing(sock):
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Socket was expected'):
self.loop.run_until_complete(coro)
@mock.patch('trollius.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
m_socket.error = socket.error
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, '/dev/null', sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, '/dev/null', server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, '/dev/null', ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
tr = self.read_pipe_transport()
self.loop.assert_reader(5, tr._read_ready)
test_utils.run_briefly(self.loop)
self.protocol.connection_made.assert_called_with(tr)
def test_ctor_with_waiter(self):
fut = asyncio.Future(loop=self.loop)
tr = self.read_pipe_transport(waiter=fut)
test_utils.run_briefly(self.loop)
self.assertIsNone(fut.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('trollius.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr._closing)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
tr = self.write_pipe_transport()
self.loop.assert_reader(5, tr._read_ready)
test_utils.run_briefly(self.loop)
self.protocol.connection_made.assert_called_with(tr)
def test_ctor_with_waiter(self):
fut = asyncio.Future(loop=self.loop)
tr = self.write_pipe_transport(waiter=fut)
self.loop.assert_reader(5, tr._read_ready)
test_utils.run_briefly(self.loop)
self.assertEqual(None, fut.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'ta'], tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'previous']
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'previous', b'data'], tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
@mock.patch('trollius.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr._closing)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.return_value = 4
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.return_value = 3
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'a'], tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
@mock.patch('trollius.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.side_effect = err = OSError()
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr._closing)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal write error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = [b'da', b'ta']
m_write.return_value = 4
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual([], tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr._closing)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
# closing the transport twice must not fail
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr._closing)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr._closing)
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
self.loop = self.new_test_loop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
exit_stack = []
def patch(target, wrapper):
m = mock.patch(target, wraps=wrapper)
exit_stack.append(m)
return m.__enter__()
m_waitpid = patch('os.waitpid', self.waitpid)
m_WIFEXITED = patch('os.WIFEXITED', self.WIFEXITED)
m_WIFSIGNALED = patch('os.WIFSIGNALED', self.WIFSIGNALED)
m_WEXITSTATUS = patch('os.WEXITSTATUS', self.WEXITSTATUS)
m_WTERMSIG = patch('os.WTERMSIG', self.WTERMSIG)
try:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
finally:
for obj in reversed(exit_stack):
obj.__exit__(None, None, None)
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child is running
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (returncode 12)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (signal 3)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
# child 2 still running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 2 terminates (code 108)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (code 78)
# child 2 terminates (signal 5)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
# child terminates before being registered
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register the same child again
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (signal 8)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# unregister the child
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (code 99)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# terminate with unknown status
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
# register children
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
# remove child handler 1
self.assertTrue(self.watcher.remove_child_handler(54))
# remove child handler 2 multiple times
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
# all children terminate
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
# raise an exception
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates
self.running = False
self.add_zombie(58, 4)
# waitpid is called elsewhere
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
# sigchld
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
# here the FastChildWatche enters a deadlock
# (there is no way to prevent it)
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
# register two children
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings:
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(591, 7)
# an unknown child terminates
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
# attach a new loop
old_loop = self.loop
self.loop = self.new_test_loop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove:
with patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
# register 3 children
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
# detach the loop
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
# child 1 & 2 terminate
self.add_zombie(61, 11)
self.add_zombie(62, -5)
# SIGCHLD was not caught
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
# attach a new loop
self.loop = self.new_test_loop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11) # race condition!
callback2.assert_called_once_with(62, -5) # race condition!
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
# child 3 terminates
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
# register two children
callback1 = mock.Mock()
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(63, 9)
# other child terminates
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(test_utils.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
self.assertIsNone(watcher._loop)
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_with_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIs(watcher._loop, loop)
loop.close()
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
watcher = policy.get_child_watcher()
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
if __name__ == '__main__':
unittest.main()
|
|
import numpy as np
from .Transform import *
class TransformObject:
def __init__(self, local=None):
self.quat = Float4(0.0, 0.0, 0.0, 1.0)
self.local = local if local is not None else Matrix4()
self.updated = True
self.left = WORLD_LEFT.copy()
self.up = WORLD_UP.copy()
self.front = WORLD_FRONT.copy()
self.pos = Float3()
self.rot = Float3()
self.scale = Float3(1, 1, 1)
self.prev_Pos = Float3()
self.prev_Rot = Float3()
self.prev_Scale = Float3(1, 1, 1)
self.rotationMatrix = Matrix4()
self.matrix = Matrix4()
self.inverse_matrix = Matrix4()
self.prev_matrix = Matrix4()
self.prev_inverse_matrix = Matrix4()
self.update_transform(True)
def reset_transform(self):
self.updated = True
self.set_pos(Float3())
self.set_rotation(Float3())
self.set_scale(Float3(1, 1, 1))
self.update_transform(True)
# Translate
def get_pos(self):
return self.pos
def get_pos_x(self):
return self.pos[0]
def get_pos_y(self):
return self.pos[1]
def get_pos_z(self):
return self.pos[2]
def set_pos(self, pos):
self.pos[...] = pos
def set_pos_x(self, x):
self.pos[0] = x
def set_pos_y(self, y):
self.pos[1] = y
def set_pos_z(self, z):
self.pos[2] = z
def move(self, pos):
self.pos[...] = self.pos + pos
def move_front(self, pos):
self.pos[...] = self.pos + self.front * pos
def move_left(self, pos):
self.pos[...] = self.pos + self.left * pos
def move_up(self, pos):
self.pos[...] = self.pos + self.up * pos
def move_x(self, pos_x):
self.pos[0] += pos_x
def move_y(self, pos_y):
self.pos[1] += pos_y
def move_z(self, pos_z):
self.pos[2] += pos_z
# Rotation
def get_rotation(self):
return self.rot
def get_pitch(self):
return self.rot[0]
def get_yaw(self):
return self.rot[1]
def get_roll(self):
return self.rot[2]
def set_rotation(self, rot):
self.rot[...] = rot
def set_pitch(self, pitch):
if pitch > TWO_PI or pitch < 0.0:
pitch %= TWO_PI
self.rot[0] = pitch
def set_yaw(self, yaw):
if yaw > TWO_PI or yaw < 0.0:
yaw %= TWO_PI
self.rot[1] = yaw
def set_roll(self, roll):
if roll > TWO_PI or roll < 0.0:
roll %= TWO_PI
self.rot[2] = roll
def rotation(self, rot):
self.rotation_pitch(rot[0])
self.rotation_yaw(rot[1])
self.rotation_roll(rot[2])
def rotation_pitch(self, delta=0.0):
self.rot[0] += delta
if self.rot[0] > TWO_PI or self.rot[0] < 0.0:
self.rot[0] %= TWO_PI
def rotation_yaw(self, delta=0.0):
self.rot[1] += delta
if self.rot[1] > TWO_PI or self.rot[1] < 0.0:
self.rot[1] %= TWO_PI
def rotation_roll(self, delta=0.0):
self.rot[2] += delta
if self.rot[2] > TWO_PI or self.rot[2] < 0.0:
self.rot[2] %= TWO_PI
# Scale
def get_scale(self):
return self.scale
def get_scale_x(self):
return self.scale[0]
def get_scale_Y(self):
return self.scale[1]
def get_scale_z(self):
return self.scale[2]
def set_scale(self, scale):
self.scale[...] = scale
def set_scale_x(self, x):
self.scale[0] = x
def set_scale_y(self, y):
self.scale[1] = y
def set_scale_z(self, z):
self.scale[2] = z
def scaling(self, scale):
self.scale[...] = self.scale + scale
# update Transform
def update_transform(self, update_inverse_matrix=False, force_update=False):
prev_updated = self.updated
self.updated = False
if any(self.prev_Pos != self.pos) or force_update:
self.prev_Pos[...] = self.pos
self.updated = True
if any(self.prev_Rot != self.rot) or force_update:
self.prev_Rot[...] = self.rot
self.updated = True
# Matrix Rotation - faster
matrix_rotation(self.rotationMatrix, *self.rot)
matrix_to_vectors(self.rotationMatrix, self.left, self.up, self.front)
# Euler Rotation - slow
# p = get_rotation_matrix_x(self.rot[0])
# y = get_rotation_matrix_y(self.rot[1])
# r = get_rotation_matrix_z(self.rot[2])
# self.rotationMatrix = np.dot(p, np.dot(y, r))
# matrix_to_vectors(self.rotationMatrix, self.right, self.up, self.front)
# Quaternion Rotation - slower
# euler_to_quaternion(*self.rot, self.quat)
# quaternion_to_matrix(self.quat, self.rotationMatrix)
# matrix_to_vectors(self.rotationMatrix, self.right, self.up, self.front)
if any(self.prev_Scale != self.scale) or force_update:
self.prev_Scale[...] = self.scale
self.updated = True
if prev_updated or self.updated:
self.prev_matrix[...] = self.matrix
if update_inverse_matrix:
self.prev_inverse_matrix[...] = self.inverse_matrix
if self.updated:
self.matrix[...] = self.local
transform_matrix(self.matrix, self.pos, self.rotationMatrix, self.scale)
if update_inverse_matrix:
# self.inverse_matrix[...] = np.linalg.inv(self.matrix)
self.inverse_matrix[...] = self.local
inverse_transform_matrix(self.inverse_matrix, self.pos, self.rotationMatrix, self.scale)
return self.updated
def get_transform_infos(self):
text = "\tPosition : " + " ".join(["%2.2f" % i for i in self.pos])
text += "\n\tRotation : " + " ".join(["%2.2f" % i for i in self.rot])
text += "\n\tFront : " + " ".join(["%2.2f" % i for i in self.front])
text += "\n\tLeft : " + " ".join(["%2.2f" % i for i in self.left])
text += "\n\tUp : " + " ".join(["%2.2f" % i for i in self.up])
text += "\n\tMatrix"
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[0, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[1, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[2, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[3, :]])
return text
|
|
from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.db.models import Extent3D, Union
from django.contrib.gis.db.models.functions import (
AsGeoJSON, AsKML, Length, Perimeter, Scale, Translate,
)
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from .models import (
City3D, Interstate2D, Interstate3D, InterstateProj2D, InterstateProj3D,
MultiPoint3D, Point2D, Point3D, Polygon2D, Polygon3D,
)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
class Geo3DLoadingHelper(object):
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_storage")
class Geo3DTest(Geo3DLoadingHelper, TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
with self.assertRaises(LayerMapError):
LayerMapping(Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
@ignore_warnings(category=RemovedInDjango20Warning)
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
@skipUnlessDBFeature("supports_3d_functions")
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@skipUnlessDBFeature("supports_3d_functions")
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
check_extent3d(extent)
self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d'])
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_functions")
class Geo3DFunctionsTests(Geo3DLoadingHelper, TestCase):
def test_kml(self):
"""
Test KML() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(kml=AsKML('point', precision=6)).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoJSON() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(geojson=AsGeoJSON('point', precision=6)).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_perimeter(self):
"""
Testing Perimeter() function on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
poly2d = Polygon2D.objects.annotate(perimeter=Perimeter('poly')).get(name='2D BBox')
self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol)
poly3d = Polygon3D.objects.annotate(perimeter=Perimeter('poly')).get(name='3D BBox')
self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol)
def test_length(self):
"""
Testing Length() function on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
inter2d = Interstate2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = Interstate3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
inter2d = InterstateProj2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = InterstateProj3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
def test_scale(self):
"""
Testing Scale() function on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.annotate(scale=Scale('point', 1.0, 1.0, zscale)):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing Translate() function on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.annotate(translate=Translate('point', 0, 0, ztrans)):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
|
|
#!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Corp.
# See LICENSE.txt for license details.
# Author:
# Trent Mick ([email protected])
# Home:
# http://trentm.com/projects/which/
r"""Find the full path to commands.
which(command, path=None, verbose=0, exts=None)
Return the full path to the first match of the given command on the
path.
whichall(command, path=None, verbose=0, exts=None)
Return a list of full paths to all matches of the given command on
the path.
whichgen(command, path=None, verbose=0, exts=None)
Return a generator which will yield full paths to all matches of the
given command on the path.
By default the PATH environment variable is searched (as well as, on
Windows, the AppPaths key in the registry), but a specific 'path' list
to search may be specified as well. On Windows, the PATHEXT environment
variable is applied as appropriate.
If "verbose" is true then a tuple of the form
(<fullpath>, <matched-where-description>)
is returned for each match. The latter element is a textual description
of where the match was found. For example:
from PATH element 0
from HKLM\SOFTWARE\...\perl.exe
"""
from __future__ import print_function
_cmdlnUsage = """
Show the full path of commands.
Usage:
which [<options>...] [<command-name>...]
Options:
-h, --help Print this help and exit.
-V, --version Print the version info and exit.
-a, --all Print *all* matching paths.
-v, --verbose Print out how matches were located and
show near misses on stderr.
-q, --quiet Just print out matches. I.e., do not print out
near misses.
-p <altpath>, --path=<altpath>
An alternative path (list of directories) may
be specified for searching.
-e <exts>, --exts=<exts>
Specify a list of extensions to consider instead
of the usual list (';'-separate list, Windows
only).
Show the full path to the program that would be run for each given
command name, if any. Which, like GNU's which, returns the number of
failed arguments, or -1 when no <command-name> was given.
Near misses include duplicates, non-regular files and (on Un*x)
files without executable access.
"""
__revision__ = "$Id: which.py 430 2005-08-20 03:11:58Z trentm $"
__version_info__ = (1, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
import sys
import getopt
import stat
#---- exceptions
class WhichError(Exception):
pass
#---- internal support stuff
def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered
def _samefile(fname1, fname2):
if sys.platform.startswith('win'):
return ( os.path.normpath(os.path.normcase(fname1)) ==\
os.path.normpath(os.path.normcase(fname2)) )
else:
return os.path.samefile(fname1, fname2)
def _cull(potential, matches, verbose=0):
"""Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
"""
for match in matches: # don't yield duplicates
if _samefile(potential[0], match[0]):
if verbose:
sys.stderr.write("duplicate: %s (%s)\n" % potential)
return None
else:
if not stat.S_ISREG(os.stat(potential[0]).st_mode):
if verbose:
sys.stderr.write("not a regular file: %s (%s)\n" % potential)
elif not os.access(potential[0], os.X_OK):
if verbose:
sys.stderr.write("no executable access: %s (%s)\n"\
% potential)
else:
matches.append(potential)
return potential
#---- module API
def whichgen(command, path=None, verbose=0, exts=None):
"""Return a generator of full paths to the given command.
"command" is the name of the executable to search for.
"path" is an optional alternate path list to search. The default is
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
This method returns a generator which yields either full paths to
the given command or, if verbose, tuples of the form (<path to
command>, <where path found>).
"""
matches = []
if path is None:
usingGivenPath = 0
path = os.environ.get("PATH", "").split(os.pathsep)
if sys.platform.startswith("win"):
path.insert(0, os.curdir) # implied by Windows shell
else:
usingGivenPath = 1
# Windows has the concept of a list of extensions (PATHEXT env var).
if sys.platform.startswith("win"):
if exts is None:
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
# If '.exe' is not in exts then obviously this is Win9x and
# or a bogus PATHEXT, then use a reasonable default.
for ext in exts:
if ext.lower() == ".exe":
break
else:
exts = ['.COM', '.EXE', '.BAT']
elif not isinstance(exts, list):
raise TypeError("'exts' argument must be a list or None")
else:
if exts is not None:
raise WhichError("'exts' argument is not supported on "\
"platform '%s'" % sys.platform)
exts = []
# File name cannot have path separators because PATH lookup does not
# work that way.
if os.sep in command or os.altsep and os.altsep in command:
pass
else:
for i in range(len(path)):
dirName = path[i]
# On windows the dirName *could* be quoted, drop the quotes
if sys.platform.startswith("win") and len(dirName) >= 2\
and dirName[0] == '"' and dirName[-1] == '"':
dirName = dirName[1:-1]
for ext in ['']+exts:
absName = os.path.abspath(
os.path.normpath(os.path.join(dirName, command+ext)))
if os.path.isfile(absName):
if usingGivenPath:
fromWhere = "from given path element %d" % i
elif not sys.platform.startswith("win"):
fromWhere = "from PATH element %d" % i
elif i == 0:
fromWhere = "from current directory"
else:
fromWhere = "from PATH element %d" % (i-1)
match = _cull((absName, fromWhere), matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
match = _getRegisteredExecutable(command)
if match is not None:
match = _cull(match, matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
def which(command, path=None, verbose=0, exts=None):
"""Return the full path to the first match of the given command on
the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned. The second
element is a textual description of where the match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
If no match is found for the command, a WhichError is raised.
"""
try:
match = next(whichgen(command, path, verbose, exts))
except StopIteration:
raise WhichError("Could not find '%s' on the path." % command)
return match
def whichall(command, path=None, verbose=0, exts=None):
"""Return a list of full paths to all matches of the given command
on the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
"""
return list( whichgen(command, path, verbose, exts) )
#---- mainline
def main(argv):
all = 0
verbose = 0
altpath = None
exts = None
try:
optlist, args = getopt.getopt(argv[1:], 'haVvqp:e:',
['help', 'all', 'version', 'verbose', 'quiet', 'path=', 'exts='])
except getopt.GetoptError as msg:
sys.stderr.write("which: error: %s. Your invocation was: %s\n"\
% (msg, argv))
sys.stderr.write("Try 'which --help'.\n")
return 1
for opt, optarg in optlist:
if opt in ('-h', '--help'):
print(_cmdlnUsage)
return 0
elif opt in ('-V', '--version'):
print("which %s" % __version__)
return 0
elif opt in ('-a', '--all'):
all = 1
elif opt in ('-v', '--verbose'):
verbose = 1
elif opt in ('-q', '--quiet'):
verbose = 0
elif opt in ('-p', '--path'):
if optarg:
altpath = optarg.split(os.pathsep)
else:
altpath = []
elif opt in ('-e', '--exts'):
if optarg:
exts = optarg.split(os.pathsep)
else:
exts = []
if len(args) == 0:
return -1
failures = 0
for arg in args:
#print("debug: search for %r" % arg)
nmatches = 0
for match in whichgen(arg, path=altpath, verbose=verbose, exts=exts):
if verbose:
print("%s (%s)" % match)
else:
print(match)
nmatches += 1
if not all:
break
if not nmatches:
failures += 1
return failures
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
|
import numpy as np
import copy
from .base import Visuals
from . import color
from .. import util
from .. import caching
from .. import grouping
from .material import SimpleMaterial, PBRMaterial, empty_material # NOQA
class TextureVisuals(Visuals):
def __init__(self,
uv=None,
material=None,
image=None):
"""
Store a single material and per-vertex UV coordinates
for a mesh.
If passed UV coordinates and a single image it will
create a SimpleMaterial for the image.
Parameters
--------------
uv : (n, 2) float
UV coordinates for the mesh
material : Material
Store images and properties
image : PIL.Image
Can be passed to automatically create material
"""
# store values we care about enough to hash
self._data = caching.DataStore()
# cache calculated values
self._cache = caching.Cache(self._data.fast_hash)
# should be (n, 2) float
self.uv = uv
if material is None:
if image is None:
self.material = empty_material()
else:
# if an image is passed create a SimpleMaterial
self.material = SimpleMaterial(image=image)
else:
# if passed assign
self.material = material
def _verify_crc(self):
"""
Dump the cache if anything in self._data has changed.
"""
self._cache.verify()
@property
def kind(self):
"""
Return the type of visual data stored
Returns
----------
kind : str
What type of visuals are defined
"""
return 'texture'
@property
def defined(self):
"""
Check if any data is stored
Returns
----------
defined : bool
Are UV coordinates and images set?
"""
ok = self.material is not None
return ok
def crc(self):
"""
Get a CRC of the stored data.
Returns
--------------
crc : int
Hash of items in self._data
"""
return self._data.crc()
@property
def uv(self):
"""
Get the stored UV coordinates.
Returns
------------
uv : (n, 2) float
Pixel position per- vertex
"""
if 'uv' in self._data:
return self._data['uv']
return None
@uv.setter
def uv(self, values):
"""
Set the UV coordinates.
Parameters
--------------
values : (n, 2) float
Pixel locations on a texture per- vertex
"""
if values is None:
self._data.clear()
else:
self._data['uv'] = np.asanyarray(
values, dtype=np.float64)
def copy(self):
"""
Return a copy of the current TextureVisuals object.
Returns
----------
copied : TextureVisuals
Contains the same information in a new object
"""
uv = self.uv
if uv is not None:
uv = uv.copy()
copied = TextureVisuals(
uv=uv,
material=copy.deepcopy(self.material))
return copied
def to_color(self):
"""
Convert textured visuals to a ColorVisuals with vertex
color calculated from texture.
Returns
-----------
vis : trimesh.visuals.ColorVisuals
Contains vertex color from texture
"""
# find the color at each UV coordinate
colors = self.material.to_color(self.uv)
# create ColorVisuals from result
vis = color.ColorVisuals(vertex_colors=colors)
return vis
def face_subset(self, face_index):
"""
Get a copy of
"""
return self.copy()
def update_vertices(self, mask):
"""
Apply a mask to remove or duplicate vertex properties.
"""
if self.uv is not None:
self.uv = self.uv[mask]
def update_faces(self, mask):
"""
Apply a mask to remove or duplicate face properties
"""
pass
def concatenate(self, others):
"""
Concatenate this TextureVisuals object with others
and return the result without modifying this visual.
Parameters
-----------
others : (n,) Visuals
Other visual objects to concatenate
Returns
-----------
concatenated : TextureVisuals
Concatenated visual objects
"""
util.log.warning('concatenating texture: may result in visual artifacts')
from .objects import concatenate
return concatenate(self, others)
def unmerge_faces(faces, *args):
"""
Textured meshes can come with faces referencing vertex
indices (`v`) and an array the same shape which references
vertex texture indices (`vt`) and sometimes even normal (`vn`).
Vertex locations with different values of any of these can't
be considered the "same" vertex, and for our simple data
model we need to not combine these vertices.
Parameters
-------------
faces : (n, d) int
References vertex indices
*args : (n, d) int
Various references of corresponding values
This is usually UV coordinates or normal indexes
Returns
-------------
new_faces : (m, d) int
New faces for masked vertices
mask_v : (p,) int
A mask to apply to vertices
mask_* : (p,) int
A mask to apply to vt array to get matching UV coordinates
Returns as many of these as args were passed
"""
# stack into pairs of (vertex index, texture index)
stackable = [np.asanyarray(faces).reshape(-1)]
# append multiple args to the correlated stack
# this is usually UV coordinates (vt) and normals (vn)
for arg in args:
stackable.append(np.asanyarray(arg).reshape(-1))
# unify them into rows of a numpy array
stack = np.column_stack(stackable)
# find unique pairs: we're trying to avoid merging
# vertices that have the same position but different
# texture coordinates
unique, inverse = grouping.unique_rows(stack)
# only take the unique pairs
pairs = stack[unique]
# try to maintain original vertex order
order = pairs[:, 0].argsort()
# apply the order to the pairs
pairs = pairs[order]
# we re-ordered the vertices to try to maintain
# the original vertex order as much as possible
# so to reconstruct the faces we need to remap
remap = np.zeros(len(order), dtype=np.int64)
remap[order] = np.arange(len(order))
# the faces are just the inverse with the new order
new_faces = remap[inverse].reshape((-1, 3))
# the mask for vertices and masks for other args
result = [new_faces]
result.extend(pairs.T)
return result
def power_resize(image, resample=1, square=False):
"""
Resize a PIL image so every dimension is a power of two.
Parameters
------------
image : PIL.Image
Input image
resample : int
Passed to Image.resize
square : bool
If True, upsize to a square image
Returns
-------------
resized : PIL.Image
Input image resized
"""
# what is the current resolution of the image in pixels
size = np.array(image.size, dtype=np.int64)
# what is the resolution of the image upsized to the nearest
# power of two on each axis: allow rectangular textures
new_size = (2 ** np.ceil(np.log2(size))).astype(np.int64)
# make every dimension the largest
if square:
new_size = np.ones(2, dtype=np.int64) * new_size.max()
# if we're not powers of two upsize
if (size != new_size).any():
return image.resize(new_size, resample=resample)
return image.copy()
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import datetime as dt
from decimal import Decimal
from itertools import product, cycle
from flask import json
from evesrp import db
from evesrp.models import Request, ActionType
from evesrp.auth import PermissionType
from evesrp.auth.models import Pilot, Division, Permission
from evesrp.util.datetime import utc
from evesrp.util.decimal import PrettyDecimal
from ...util_tests import TestLogin
class TestFilterBase(TestLogin):
DIV_1 = 'Division One'
DIV_2 = 'Division Two'
DIV_3 = 'Division Three'
killmails = [
{
'id': 42513498,
'ship_type': 'Scythe',
'corporation': 'Dreddit',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/42513498/',
'base_payout': 22000000,
'kill_timestamp': dt.datetime(2014, 11, 20, 4, 2,
tzinfo=utc),
'system': 'B-3QPD',
'constellation': 'UX3-N2',
'region': 'Catch',
'pilot': 'Paxswill',
'division': DIV_2,
'details': 'lol Stratop',
'status': ActionType.paid,
},
{
'id': 39697412,
'ship_type': 'Tristan',
'corporation': 'Dreddit',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/39697412/',
'base_payout': 9100000,
'kill_timestamp': dt.datetime(2014, 6, 23, 20, 6,
tzinfo=utc),
'system': 'Hikkoken',
'constellation': 'Ishaga',
'region': 'Black Rise',
'pilot': 'Paxswill',
'division': DIV_3,
'details': 'Elite Solo PVP',
'status': ActionType.evaluating,
},
{
'id': 39988492,
'ship_type': 'Crow',
'corporation': 'Dreddit',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/39988492/',
'base_payout': 22000000,
'kill_timestamp': dt.datetime(2014, 7, 9, 18, 22,
tzinfo=utc),
'system': 'Sadana',
'constellation': 'Mareerieh',
'region': 'Aridia',
'pilot': 'Paxswill',
'division': DIV_2,
'details': 'Not so travel interceptor',
'status': ActionType.approved,
},
{
'id': 43292478,
'ship_type': 'Guardian',
'corporation': 'Dreddit',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/43292478/',
'base_payout': 289700000,
'kill_timestamp': dt.datetime(2014, 12, 22, 4, 6,
tzinfo=utc),
'system': 'RNF-YH',
'constellation': 'JZV-O6',
'region': 'Catch',
'pilot': 'Paxswill',
'division': DIV_2,
'details': 'lol Stratop',
'status': ActionType.incomplete,
},
{
'id': 43500358,
'ship_type': 'Talwar',
'corporation': 'Dreddit',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/43500358/',
'base_payout': 13700000,
'kill_timestamp': dt.datetime(2014, 12, 31, 1, 48,
tzinfo=utc),
'system': 'Todifrauan',
'constellation': 'Aldodan',
'region': 'Metropolis',
'pilot': 'DurrHurrDurr',
'division': DIV_2,
'details': 'Bar',
'status': ActionType.evaluating,
},
{
'id': 43162254,
'ship_type': 'Cormorant',
'corporation': 'Dreddit',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/43162254/',
'base_payout': 11400000,
'kill_timestamp': dt.datetime(2014, 12, 17, 3, 31,
tzinfo=utc),
'system': 'GE-8JV',
'constellation': '9HXQ-G',
'region': 'Catch',
'pilot': 'DurrHurrDurr',
'division': DIV_2,
'details': 'lol Stratop',
'status': ActionType.approved,
},
{
'id': 31952048,
'ship_type': 'Amarr Shuttle',
'corporation': 'Science and Trade Institute',
'killmail_url': 'https://zkillboard.com/kill/31952048/',
'base_payout': 14000,
'kill_timestamp': dt.datetime(2013, 7, 16, 4, 39,
tzinfo=utc),
'system': 'Karan',
'constellation': 'Selonat',
'region': 'Aridia',
'pilot': 'Gevlon Goblin',
'division': DIV_1,
'details': 'grr goons',
'status': ActionType.approved,
},
{
'id': 41094133,
'ship_type': 'Crucifier',
'corporation': 'Unholy Knights of Cthulu',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/41094133/',
'base_payout': 8300000,
'kill_timestamp': dt.datetime(2014, 9, 6, 1, 32,
tzinfo=utc),
'system': 'Nisuwa',
'constellation': 'Okakuola',
'region': 'Black Rise',
'pilot': 'Sapporo Jones',
'division': DIV_2,
'details': 'Elite Solo PVP',
'status': ActionType.rejected,
},
{
'id': 43341679,
'ship_type': 'Vexor',
'corporation': 'Unholy Knights of Cthulu',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/43341679/',
'base_payout': 39900000,
'kill_timestamp': dt.datetime(2014, 12, 24, 7, 9,
tzinfo=utc),
'system': '4-CM8I',
'constellation': 'DITJ-X',
'region': 'Scalding Pass',
'pilot': 'Sapporo Jones',
'division': DIV_1,
'details': 'Scouting',
'status': ActionType.evaluating,
},
{
'id': 43372860,
'ship_type': 'Imperial Navy Slicer',
'corporation': 'Unholy Knights of Cthulu',
'alliance': 'Test Alliance Please Ignore',
'killmail_url': 'https://zkillboard.com/kill/43372860/',
'base_payout': 15660000,
'kill_timestamp': dt.datetime(2014, 12, 26, 0, 0,
tzinfo=utc),
'system': '8QT-H4',
'constellation': 'MPJW-6',
'region': 'Querious',
'pilot': 'Paxswill',
'division': DIV_1,
'details': 'Elite Solo PVP',
'status': ActionType.incomplete,
},
{
'id': 43975437,
'ship_type': 'Tristan',
'corporation': 'Brave Operations - Lollipop Division',
'alliance': 'Brave Collective',
'killmail_url': 'https://zkillboard.com/kill/43975437/',
'base_payout': 4800000,
'kill_timestamp': dt.datetime(2015, 1, 18, 18, 25,
tzinfo=utc),
'system': 'YHN-3K',
'constellation': 'UX3-N2',
'region': 'Catch',
'pilot': 'Zora Aran',
'division': DIV_3,
'details': 'Awox?',
'status': ActionType.rejected,
},
]
def setUp(self):
super(TestFilterBase, self).setUp()
with self.app.test_request_context():
# Divisions
divisions = {
self.DIV_1: Division(self.DIV_1),
self.DIV_2: Division(self.DIV_2),
self.DIV_3: Division(self.DIV_3),
}
# Give all permissions in all divisions to admin_user
for division in divisions.values():
for permission in PermissionType.all:
Permission(division, permission, self.admin_user)
# Pilots
pilots = {
'Paxswill': 570140137,
'Sapporo Jones': 772506501,
'DurrHurrDurr': 1456384556,
'Gevlon Goblin': 91662677,
'Zora Aran': 534674271,
}
for name, id in pilots.items():
if id % 2 == 0:
user = self.normal_user
else:
user = self.admin_user
db.session.add(Pilot(user, name, id))
# Lossmails/requests
for request_data in self.killmails:
# Copy dict before we pop stuff out of it
data_copy = dict(request_data)
# Distribute requests between users
if request_data['id'] % 2 == 0:
user = self.admin_user
else:
user = self.normal_user
details = data_copy.pop('details')
division = divisions[data_copy.pop('division')]
status = data_copy.pop('status')
data_copy['pilot_id'] = pilots[data_copy.pop('pilot')]
request = Request(user, details, division, data_copy.items())
# Set status after the base payout has been set
request.status = status
db.session.commit()
def check_filter_url(self, url, expected_ids, expected_total):
client = self.login(self.admin_name)
resp = client.get(url, headers={'Accept':'application/json'},
follow_redirects=False)
if resp.status_code == 301:
# Manually follow redirects to keep the Accept header around.
resp = client.get(resp.location,
headers={'Accept':'application/json'},
follow_redirects=False)
resp_obj = json.loads(resp.data)
# Check that the returned requests match
self.assertEqual(expected_ids,
{request['id'] for request in resp_obj['requests']})
# Check that the totals add up properly (in a roundabout way)
self.assertEqual(PrettyDecimal(expected_total).currency(),
resp_obj['total_payouts'])
class TestExactFilter(TestFilterBase):
choices = [None]
def test_exact_filter_combos(self):
# Explanation for the below: product(seq, repeat=n) computes a
# cartesian product of sequence seq against itself n times. By using
# this as a constructor to frozenset, we can combinations with repeated
# choices (ex: ['Foo', 'Foo'] as opposed to ['Bar', 'Foo']). frozenset
# is used as set() is mutable, and thus unhashable. This is all wrapped
# in a set comprehension to deduplicate combinations that differ only
# in ordering (ex: ['Foo', 'Bar'] and ['Bar', 'Foo']).
choice_combos = {frozenset(combo) for combo in product(self.choices,
repeat=2)}
for combo in choice_combos:
# Find the set of matching killmail IDs first
matching_ids = set()
total_payout = Decimal(0)
for request in self.killmails:
if combo == {None} or request.get(self.attribute) in combo:
matching_ids.add(request['id'])
if request['status'] != ActionType.rejected:
total_payout += Decimal(request['base_payout'])
# Ask the app what it thinks the matching requests are
if combo != {None}:
if self.attribute == 'ship_type':
filter_attribute = 'ship'
else:
filter_attribute = self.attribute
if self.attribute == 'status':
values = ','.join(map(lambda x: x.value, combo))
else:
values = ','.join(combo)
url = '/request/all/{}/{}'.format(filter_attribute, values)
else:
url = '/request/all/'
self.check_filter_url(url, matching_ids, total_payout)
class TestDivisionFilter(TestExactFilter):
attribute = 'division'
choices = [TestFilterBase.DIV_1, TestFilterBase.DIV_2, TestFilterBase.DIV_3]
class TestAllianceFilter(TestExactFilter):
attribute = 'alliance'
choices = [
'Test Alliance Please Ignore',
'Brave Collective',
'Goonswarm Federation',
]
class TestCorporationFilter(TestExactFilter):
attribute = 'corporation'
choices = [
'Dreddit',
'Unholy Knights of Cthulu',
'Goonwaffe',
'Science and Trade Institute',
'Brave Collective - Lollipop Division',
]
class TestPilotFilter(TestExactFilter):
attribute = 'pilot'
choices = [
'Paxswill',
'DurrHurrDurr',
'Gevlon Goblin',
'Sapporo Jones',
'Zora Aran',
]
class TestShipFilter(TestExactFilter):
attribute = 'ship_type'
choices = ['Tristan', 'Crow', 'Vexor', 'Guardian']
class TestRegionFilter(TestExactFilter):
attribute = 'region'
choices = ['Black Rise', 'Catch', 'Aridia', 'Scalding Pass']
class TestConstellationFilter(TestExactFilter):
attribute = 'constellation'
choices = ['UX3-N2', 'Ishaga', 'Mareerieh', '9HXQ-G', 'Selonat']
class TestSystemFilter(TestExactFilter):
attribute = 'system'
choices = ['GE-8JV', 'Todifrauan', 'RNF-YH', '4-CM8I', 'Karan']
class TestStatusFilter(TestExactFilter):
attribute = 'status'
choices = ActionType.statuses
class TestMultipleFilter(TestFilterBase):
choices = {}
def test_exact_multiple_filters(self):
# Compute expected values
matching_ids = set()
total_payout = Decimal(0)
for request in self.killmails:
for attribute, valid_values in self.choices.items():
if request.get(attribute) not in valid_values:
break
else:
matching_ids.add(request['id'])
if request['status'] != ActionType.rejected:
total_payout += request['base_payout']
# Ask the app what it thinks is the answer
url = '/request/all/'
for attribute, values in self.choices.items():
url += '{}/{}/'.format(attribute, ','.join(values))
self.check_filter_url(url, matching_ids, total_payout)
class TestDredditCatchFilter(TestMultipleFilter):
choices = {
'corporation': ['Dreddit'],
'region': ['Catch'],
}
|
|
import time
import urllib
import urllib2
import socket
from platform import python_version_tuple
import anyjson
from . import AuthenticationError, ConnectionError, USER_AGENT
class BaseStream(object):
"""A network connection to Twitters streaming API
:param username: Twitter username for the account accessing the API.
:param password: Twitter password for the account accessing the API.
:keyword count: Number of tweets from the past to get before switching to
live stream.
:keyword url: Endpoint URL for the object. Note: you should not
need to edit this. It's present to make testing easier.
.. attribute:: connected
True if the object is currently connected to the stream.
.. attribute:: url
The URL to which the object is connected
.. attribute:: starttime
The timestamp, in seconds since the epoch, the object connected to the
streaming api.
.. attribute:: count
The number of tweets that have been returned by the object.
.. attribute:: rate
The rate at which tweets have been returned from the object as a
float. see also :attr: `rate_period`.
.. attribute:: rate_period
The ammount of time to sample tweets to calculate tweet rate. By
default 10 seconds. Changes to this attribute will not be reflected
until the next time the rate is calculated. The rate of tweets vary
with time of day etc. so it's usefull to set this to something
sensible.
.. attribute:: user_agent
User agent string that will be included in the request. NOTE: This can
not be changed after the connection has been made. This property must
thus be set before accessing the iterator. The default is set in
:attr: `USER_AGENT`.
"""
def __init__(self, username, password, catchup=None, url=None):
self._conn = None
self._rate_ts = None
self._rate_cnt = 0
self._username = username
self._password = password
self._catchup_count = catchup
self._iter = self.__iter__()
self.rate_period = 10 # in seconds
self.connected = False
self.starttime = None
self.count = 0
self.rate = 0
self.user_agent = USER_AGENT
if url: self.url = url
def __enter__(self):
return self
def __exit__(self, *params):
self.close()
return False
def _init_conn(self):
"""Open the connection to the twitter server"""
headers = {'User-Agent': self.user_agent}
postdata = self._get_post_data() or {}
if self._catchup_count:
postdata["count"] = self._catchup_count
poststring = urllib.urlencode(postdata) if postdata else None
req = urllib2.Request(self.url, poststring, headers)
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self.url, self._username, self._password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
try:
self._conn = opener.open(req)
except urllib2.HTTPError, exception:
if exception.code == 401:
raise AuthenticationError("Access denied")
elif exception.code == 404:
raise ConnectionError("URL not found: %s" % self.url)
else: # re raise. No idea what would cause this, so want to know
raise
except urllib2.URLError, exception:
raise ConnectionError(exception.reason)
# This is horrible. This line grabs the raw socket (actually an ssl
# wrapped socket) from the guts of urllib2/httplib. We want the raw
# socket so we can bypass the buffering that those libs provide.
# The buffering is reasonable when dealing with connections that
# try to finish as soon as possible. With twitters' never ending
# connections, it causes a bug where we would not deliver tweets
# until the buffer was full. That's problematic for very low volume
# filterstreams, since you might not see a tweet for minutes or hours
# after they occured while the buffer fills.
#
# Oh, and the inards of the http libs are different things on in
# py2 and 3, so need to deal with that. py3 libs do more of what I
# want by default, but I wont do more special casing for it than
# neccessary.
major, _, _ = python_version_tuple()
# The cast is needed because apparently some versions return strings
# and some return ints.
# On my ubuntu with stock 2.6 I get strings, which match the docs.
# Someone reported the issue on 2.6.1 on macos, but that was
# manually built, not the bundled one. Anyway, cast for safety.
major = int(major)
if major == 2:
self._socket = self._conn.fp._sock.fp._sock
else:
self._socket = self._conn.fp.raw
# our code that reads from the socket expects a method called recv.
# py3 socket.SocketIO uses the name read, so alias it.
self._socket.recv = self._socket.read
self.connected = True
if not self.starttime:
self.starttime = time.time()
if not self._rate_ts:
self._rate_ts = time.time()
def _get_post_data(self):
"""Subclasses that need to add post data to the request can override
this method and return post data. The data should be in the format
returned by urllib.urlencode."""
return None
def _update_rate(self):
rate_time = time.time() - self._rate_ts
if not self._rate_ts or rate_time > self.rate_period:
self.rate = self._rate_cnt / rate_time
self._rate_cnt = 0
self._rate_ts = time.time()
def __iter__(self):
buf = b""
while True:
try:
if not self.connected:
self._init_conn()
buf += self._socket.recv(8192)
if buf == b"": # something is wrong
self.close()
raise ConnectionError("Got entry of length 0. Disconnected")
elif buf.isspace():
buf = b""
elif b"\r" not in buf: # not enough data yet. Loop around
continue
lines = buf.split(b"\r")
buf = lines[-1]
lines = lines[:-1]
for line in lines:
line = line.decode("utf8")
try:
tweet = anyjson.deserialize(line)
except ValueError, e:
self.close()
raise ConnectionError("Got invalid data from twitter", details=line)
if 'text' in tweet:
self.count += 1
self._rate_cnt += 1
yield tweet
except socket.error, e:
self.close()
raise ConnectionError("Server disconnected")
def next(self):
"""Return the next available tweet. This call is blocking!"""
return self._iter.next()
def close(self):
"""
Close the connection to the streaming server.
"""
self.connected = False
if self._conn:
self._conn.close()
class SampleStream(BaseStream):
url = "https://stream.twitter.com/1/statuses/sample.json"
class FilterStream(BaseStream):
url = "https://stream.twitter.com/1/statuses/filter.json"
def __init__(self, username, password, follow=None, locations=None,
track=None, catchup=None, url=None):
self._follow = follow
self._locations = locations
self._track = track
# remove follow, locations, track
BaseStream.__init__(self, username, password, url=url)
def _get_post_data(self):
postdata = {}
if self._follow: postdata["follow"] = ",".join([str(e) for e in self._follow])
if self._locations: postdata["locations"] = ",".join(self._locations)
if self._track: postdata["track"] = ",".join(self._track)
return postdata
|
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_tri_sym_convert():
from brainiak.utils.utils import from_tri_2_sym, from_sym_2_tri
import numpy as np
sym = np.random.rand(3, 3)
tri = from_sym_2_tri(sym)
assert tri.shape[0] == 6,\
"from_sym_2_tri returned wrong result!"
sym1 = from_tri_2_sym(tri, 3)
assert sym1.shape[0] == sym1.shape[1],\
"from_tri_2_sym returned wrong shape!"
tri1 = from_sym_2_tri(sym1)
assert np.array_equiv(tri, tri1),\
"from_sym_2_tri returned wrong result!"
def test_sumexp():
from brainiak.utils.utils import sumexp_stable
import numpy as np
data = np.array([[1, 1], [0, 1]])
sums, maxs, exps = sumexp_stable(data)
assert sums.size == data.shape[1], (
"Invalid sum(exp(v)) computation (wrong # samples in sums)")
assert exps.shape[0] == data.shape[0], (
"Invalid exp(v) computation (wrong # features)")
assert exps.shape[1] == data.shape[1], (
"Invalid exp(v) computation (wrong # samples)")
assert maxs.size == data.shape[1], (
"Invalid max computation (wrong # samples in maxs)")
def test_concatenate_not_none():
from brainiak.utils.utils import concatenate_not_none
import numpy as np
arrays = [None] * 5
arrays[1] = np.array([0, 1, 2])
arrays[3] = np.array([3, 4])
r = concatenate_not_none(arrays, axis=0)
assert np.all(np.arange(5) == r), (
"Invalid concatenation of a list of arrays")
def test_cov2corr():
from brainiak.utils.utils import cov2corr
import numpy as np
cov = np.array([[4, 3, 0], [3, 9, 0], [0, 0, 1]])
corr = cov2corr(cov)
assert np.allclose(corr,
np.array([[1, 0.5, 0], [0.5, 1, 0], [0, 0, 1]])), (
"Converting from covariance matrix to correlation incorrect")
def test_ReadDesign():
from brainiak.utils.utils import ReadDesign
import numpy as np
import os.path
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
design = ReadDesign(fname=file_path, include_orth=False,
include_pols=False)
assert design, 'Failed to read design matrix'
assert design.reg_nuisance is None, \
'Nuiance regressor is not None when include_orth and include_pols are'\
' both set to False'
read = ReadDesign()
assert read, 'Failed to initialize an instance of the class'
design = ReadDesign(fname=file_path, include_orth=True, include_pols=True)
assert np.size(design.cols_nuisance) == 10, \
'Mistake in counting the number of nuiance regressors'
assert np.size(design.cols_task) == 17, \
'Mistake in counting the number of task conditions'
assert (np.shape(design.reg_nuisance)[0]
== np.shape(design.design_task)[0]
), 'The number of time points in nuiance regressor does not match'\
' that of task response'
def test_gen_design():
from brainiak.utils.utils import gen_design
import numpy as np
import os.path
files = {'FSL1': 'example_stimtime_1_FSL.txt',
'FSL2': 'example_stimtime_2_FSL.txt',
'AFNI1': 'example_stimtime_1_AFNI.txt',
'AFNI2': 'example_stimtime_2_AFNI.txt'}
for key in files.keys():
files[key] = os.path.join(os.path.dirname(__file__), files[key])
design1 = gen_design(stimtime_files=files['FSL1'], scan_duration=[48, 20],
TR=2, style='FSL')
assert design1.shape == (34, 1), 'Returned design matrix has wrong shape'
assert design1[24] == 0, (
"gen_design should generated design matrix for each run separately "
"and concatenate them.")
design2 = gen_design(stimtime_files=[files['FSL1'], files['FSL2']],
scan_duration=[48, 20], TR=2, style='FSL')
assert design2.shape == (34, 2), 'Returned design matrix has wrong shape'
design3 = gen_design(stimtime_files=files['FSL1'], scan_duration=68, TR=2,
style='FSL')
assert design3[24] != 0, (
'design matrix should be non-zero 8 seconds after an event onset.')
design4 = gen_design(stimtime_files=[files['FSL2']],
scan_duration=[48, 20], TR=2, style='FSL')
assert np.all(np.isclose(design1 * 0.5, design4)), (
'gen_design does not treat missing values correctly')
design5 = gen_design(stimtime_files=[files['FSL2']],
scan_duration=[48, 20], TR=1)
assert (np.abs(design4 - design5[::2])).mean() < 0.1, (
'design matrices sampled at different frequency do not match'
' at corresponding time points')
design6 = gen_design(stimtime_files=[files['AFNI1']],
scan_duration=[48, 20], TR=2, style='AFNI')
assert np.all(np.isclose(design1, design6)), (
'design matrices generated from AFNI style and FSL style do not match')
design7 = gen_design(stimtime_files=[files['AFNI2']],
scan_duration=[48], TR=2, style='AFNI')
assert np.all(design7 == 0.0), (
'A negative stimulus onset of AFNI style should result in an all-zero'
+ ' design matrix')
def test_center_mass_exp():
from brainiak.utils.utils import center_mass_exp
import numpy as np
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp([1, 2])
assert ('interval must be a tuple'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((1, 2, 3))
assert ('interval must be length two'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((-2, -1))
assert ('interval_left must be non-negative'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((-2, 3))
assert ('interval_left must be non-negative'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((3, 3))
assert ('interval_right must be bigger than interval_left'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((1, 2), -1)
assert ('scale must be positive'
in str(excinfo.value))
result = center_mass_exp((0, np.inf), 2.0)
assert np.isclose(result, 2.0), 'center of mass '\
'incorrect for the whole distribution'
result = center_mass_exp((1.0, 1.0+2e-10))
assert np.isclose(result, 1.0+1e-10), 'for a small '\
'enough interval, the center of mass should be '\
'close to its mid-point'
def test_p_from_null():
import numpy as np
from brainiak.utils.utils import p_from_null
# Create random null and observed value in tail
null = np.random.randn(10000)
observed = np.ceil(np.percentile(null, 97.5) * 1000) / 1000
# Check that we catch improper side
with pytest.raises(ValueError):
_ = p_from_null(observed, null, side='wrong')
# Check two-tailed p-value for observed
p_ts = p_from_null(observed, null)
assert np.isclose(p_ts, 0.05, atol=1e-02)
# Check two-tailed p-value for observed
p_right = p_from_null(observed, null, side='right')
assert np.isclose(p_right, 0.025, atol=1e-02)
assert np.isclose(p_right, p_ts / 2, atol=1e-02)
# Check two-tailed p-value for observed
p_left = p_from_null(observed, null, side='left')
assert np.isclose(p_left, 0.975, atol=1e-02)
assert np.isclose(1 - p_left, p_right, atol=1e-02)
assert np.isclose(1 - p_left, p_ts / 2, atol=1e-02)
# Check 2-dimensional input (i.e., samples by voxels)
null = np.random.randn(10000, 3)
observed = np.ceil(np.percentile(null, 97.5, axis=0) * 1000) / 1000
# Check two-tailed p-value for observed
p_ts = p_from_null(observed, null, axis=0)
assert np.allclose(p_ts, 0.05, atol=1e-02)
# Check two-tailed p-value for observed
p_right = p_from_null(observed, null, side='right', axis=0)
assert np.allclose(p_right, 0.025, atol=1e-02)
assert np.allclose(p_right, p_ts / 2, atol=1e-02)
# Check two-tailed p-value for observed
p_left = p_from_null(observed, null, side='left', axis=0)
assert np.allclose(p_left, 0.975, atol=1e-02)
assert np.allclose(1 - p_left, p_right, atol=1e-02)
assert np.allclose(1 - p_left, p_ts / 2, atol=1e-02)
# Check for exact test
p_ts = p_from_null(observed, null, exact=True, axis=0)
assert np.allclose(p_ts, 0.05, atol=1e-02)
# Check two-tailed p-value for exact
p_right = p_from_null(observed, null, side='right',
exact=True, axis=0)
assert np.allclose(p_right, 0.025, atol=1e-02)
assert np.allclose(p_right, p_ts / 2, atol=1e-02)
# Check two-tailed p-value for exact
p_left = p_from_null(observed, null, side='left',
exact=True, axis=0)
assert np.allclose(p_left, 0.975, atol=1e-02)
assert np.allclose(1 - p_left, p_right, atol=1e-02)
assert np.allclose(1 - p_left, p_ts / 2, atol=1e-02)
def test_phase_randomize():
import numpy as np
from scipy.fftpack import fft
from scipy.stats import pearsonr
from brainiak.utils.utils import phase_randomize
data = np.repeat(np.repeat(np.random.randn(60)[:, np.newaxis, np.newaxis],
30, axis=1),
20, axis=2)
assert np.array_equal(data[..., 0], data[..., 1])
# Phase-randomize data across subjects (same across voxels)
shifted_data = phase_randomize(data, voxelwise=False, random_state=1)
assert shifted_data.shape == data.shape
assert not np.array_equal(shifted_data[..., 0], shifted_data[..., 1])
assert not np.array_equal(shifted_data[..., 0], data[..., 0])
# Check that uneven n_TRs doesn't explode
_ = phase_randomize(data[:-1, ...])
# Check that random_state returns same shifts
shifted_data_ = phase_randomize(data, voxelwise=False, random_state=1)
assert np.array_equal(shifted_data, shifted_data_)
shifted_data_ = phase_randomize(data, voxelwise=False, random_state=2)
assert not np.array_equal(shifted_data, shifted_data_)
# Phase-randomize subjects and voxels
shifted_data = phase_randomize(data, voxelwise=True, random_state=1)
assert shifted_data.shape == data.shape
assert not np.array_equal(shifted_data[..., 0], shifted_data[..., 1])
assert not np.array_equal(shifted_data[..., 0], data[..., 0])
assert not np.array_equal(shifted_data[:, 0, 0], shifted_data[:, 1, 0])
# Try with 2-dimensional input
shifted_data = phase_randomize(data[..., 0],
voxelwise=True,
random_state=1)
assert shifted_data.ndim == 2
assert not np.array_equal(shifted_data[:, 0], shifted_data[:, 1])
# Create correlated noisy data
corr_data = np.repeat(np.random.randn(60)[:, np.newaxis, np.newaxis],
2, axis=2) + np.random.randn(60, 1, 2)
# Get correlation and frequency domain for data
corr_r = pearsonr(corr_data[:, 0, 0],
corr_data[:, 0, 1])[0]
corr_freq = fft(corr_data, axis=0)
# Phase-randomize time series and get correlation/frequency
shifted_data = phase_randomize(corr_data)
shifted_r = pearsonr(shifted_data[:, 0, 0],
shifted_data[:, 0, 1])[0]
shifted_freq = fft(shifted_data, axis=0)
# Check that phase-randomization reduces correlation
assert np.abs(shifted_r) < np.abs(corr_r)
# Check that amplitude spectrum is preserved
assert np.allclose(np.abs(shifted_freq), np.abs(corr_freq))
def test_check_timeseries_input():
import numpy as np
from itertools import combinations
from brainiak.utils.utils import _check_timeseries_input
# Set a fixed vector for comparison
vector = np.random.randn(60)
# List of subjects with one voxel/ROI
list_1d = [vector for _ in np.arange(10)]
(data_list_1d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_1d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# Array of subjects with one voxel/ROI
array_2d = np.hstack([vector[:, np.newaxis]
for _ in np.arange(10)])
(data_array_2d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(array_2d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# List of 2-dimensional arrays
list_2d = [vector[:, np.newaxis] for _ in np.arange(10)]
(data_list_2d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_2d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# Check if lists have mismatching size
list_bad = [list_2d[0][:-1, :]] + list_2d[1:]
with pytest.raises(ValueError):
(data_list_bad, _, _, _) = _check_timeseries_input(list_bad)
# List of 3-dimensional arrays
list_3d = [vector[:, np.newaxis, np.newaxis]
for _ in np.arange(10)]
(data_list_3d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_3d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# 3-dimensional array
array_3d = np.dstack([vector[:, np.newaxis]
for _ in np.arange(10)])
(data_array_3d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(array_3d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# Check that 4-dimensional input array throws error
array_4d = array_3d[..., np.newaxis]
with pytest.raises(ValueError):
(data_array_4d, _, _, _) = _check_timeseries_input(array_4d)
# Check they're the same
for pair in combinations([data_list_1d, data_array_2d,
data_list_2d, data_list_3d,
data_array_3d], 2):
assert np.array_equal(pair[0], pair[1])
# List of multivoxel arrays
matrix = np.random.randn(60, 30)
list_mv = [matrix
for _ in np.arange(10)]
(data_list_mv, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_mv)
assert n_TRs == 60
assert n_voxels == 30
assert n_subjects == 10
# 3-dimensional array with multiple voxels
array_mv = np.dstack([matrix for _ in np.arange(10)])
(data_array_mv, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(array_mv)
assert n_TRs == 60
assert n_voxels == 30
assert n_subjects == 10
assert np.array_equal(data_list_mv, data_array_mv)
def test_array_correlation():
import numpy as np
from brainiak.utils.utils import array_correlation
from scipy.stats import pearsonr
# Minimal array datasets
n_TRs = 30
n_voxels = 2
x, y = (np.random.randn(n_TRs, n_voxels),
np.random.randn(n_TRs, n_voxels))
# Perform the correlation
r = array_correlation(x, y)
# Check there are the right number of voxels in the output
assert r.shape == (n_voxels,)
# Check that this (roughly) matches corrcoef
assert np.allclose(r, np.corrcoef(x.T, y.T)[[0, 1], [2, 3]])
# Check that this (roughly) matches pearsonr
assert np.allclose(r, np.array([pearsonr(x[:, 0], y[:, 0])[0],
pearsonr(x[:, 1], y[:, 1])[0]]))
# Try axis argument
assert np.allclose(array_correlation(x, y, axis=0),
array_correlation(x.T, y.T, axis=1))
# Trigger shape mismatch error
with pytest.raises(ValueError):
array_correlation(x, y[:, 0])
with pytest.raises(ValueError):
array_correlation(x, y[:-1])
# Feed in lists
_ = array_correlation(x.tolist(), y)
_ = array_correlation(x, y.tolist())
_ = array_correlation(x.tolist(), y.tolist())
# Check 1D array input
x, y = (np.random.randn(n_TRs),
np.random.randn(n_TRs))
assert type(array_correlation(x, y)) == np.float64
assert np.isclose(array_correlation(x, y),
pearsonr(x, y)[0])
# 1D list inputs
_ = array_correlation(x.tolist(), y)
_ = array_correlation(x, y.tolist())
_ = array_correlation(x.tolist(), y.tolist())
# Check integer inputs
x, y = (np.random.randint(0, 9, (n_TRs, n_voxels)),
np.random.randint(0, 9, (n_TRs, n_voxels)))
_ = array_correlation(x, y)
|
|
# -*- coding: utf-8 -*-
"""
python -c "import utool as ut; ut.write_modscript_alias('Tgen.sh', 'ibeis.templates.template_generator')"
sh Tgen.sh --key review --invert --Tcfg with_getters=True with_setters=False --modfname manual_review_funcs
# TODO: Fix this name it is too special case
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six # NOQA
from six.moves import zip, map, reduce
#import numpy as np
#import vtool_ibeis as vt
import numpy as np
import ubelt as ub # NOQA
from ibeis import constants as const
from ibeis.control import accessor_decors, controller_inject # NOQA
import utool as ut
import uuid
from ibeis.control.controller_inject import make_ibs_register_decorator
print, rrr, profile = ut.inject2(__name__)
VERBOSE_SQL = ut.get_argflag(('--print-sql', '--verbose-sql', '--verb-sql', '--verbsql'))
CLASS_INJECT_KEY, register_ibs_method = make_ibs_register_decorator(__name__)
register_api = controller_inject.get_ibeis_flask_api(__name__)
REVIEW_ROWID = 'review_rowid'
REVIEW_UUID = 'review_uuid'
REVIEW_AID1 = 'annot_1_rowid'
REVIEW_AID2 = 'annot_2_rowid'
REVIEW_COUNT = 'review_count'
REVIEW_EVIDENCE_DECISION = 'review_evidence_decision'
REVIEW_META_DECISION = 'review_meta_decision'
REVIEW_USER_IDENTITY = 'review_user_identity'
REVIEW_USER_CONFIDENCE = 'review_user_confidence'
REVIEW_TAGS = 'review_tags'
REVIEW_TIME_CLIENT_START = 'review_client_start_time_posix'
REVIEW_TIME_CLIENT_END = 'review_client_end_time_posix'
REVIEW_TIME_SERVER_START = 'review_server_start_time_posix'
REVIEW_TIME_SERVER_END = 'review_server_end_time_posix'
def e_(u, v):
return (u, v) if u < v else (v, u)
def hack_create_aidpair_index(ibs):
# HACK IN INDEX
sqlfmt = ut.codeblock(
'''
CREATE INDEX IF NOT EXISTS {index_name} ON {table} ({index_cols});
''')
sqlcmd = sqlfmt.format(
index_name='aidpair_to_rowid',
table=ibs.const.REVIEW_TABLE,
index_cols=','.join([REVIEW_AID1, REVIEW_AID2])
)
ibs.staging.connection.execute(sqlcmd).fetchall()
sqlcmd = sqlfmt.format(
index_name='aid1_to_rowids',
table=ibs.const.REVIEW_TABLE,
index_cols=','.join([REVIEW_AID1])
)
ibs.staging.connection.execute(sqlcmd).fetchall()
sqlcmd = sqlfmt.format(
index_name='aid2_to_rowids',
table=ibs.const.REVIEW_TABLE,
index_cols=','.join([REVIEW_AID2])
)
ibs.staging.connection.execute(sqlcmd).fetchall()
@register_ibs_method
@accessor_decors.ider
@register_api('/api/review/', methods=['GET'])
def _get_all_review_rowids(ibs):
r"""
Returns:
list_ (list): all nids of known animals
(does not include unknown names)
"""
all_known_review_rowids = ibs.staging.get_all_rowids(const.REVIEW_TABLE)
return all_known_review_rowids
@register_ibs_method
def _set_review_uuids(ibs, review_rowid_list, review_uuid_list):
r"""
Returns:
list_ (list): all nids of known animals
(does not include unknown names)
"""
id_iter = ((review_rowid,) for review_rowid in review_rowid_list)
val_iter = ((review_uuid,) for review_uuid in review_uuid_list)
ibs.staging.set(const.REVIEW_TABLE, (REVIEW_UUID,), val_iter, id_iter)
@register_ibs_method
def get_review_rowid_from_superkey(ibs, aid_1_list, aid_2_list, count_list,
eager=False, nInput=None):
""" Returns review_rowid_list
Args:
superkey lists: review_rowid_list, aid_list
Returns:
review_rowid_list
"""
colnames = (REVIEW_ROWID,)
params_iter = zip(aid_1_list, aid_2_list, count_list)
where_colnames = [REVIEW_AID1, REVIEW_AID2, REVIEW_COUNT]
review_rowid_list = list(ibs.staging.get_where_eq(
const.REVIEW_TABLE, colnames, params_iter, where_colnames,
eager=eager, nInput=nInput))
return review_rowid_list
@register_ibs_method
@accessor_decors.adder
@register_api('/api/review/', methods=['POST'])
def add_review(ibs, aid_1_list, aid_2_list, evidence_decision_list,
meta_decision_list=None, review_uuid_list=None,
identity_list=None, user_confidence_list=None, tags_list=None,
review_client_start_time_posix=None, review_client_end_time_posix=None,
review_server_start_time_posix=None, review_server_end_time_posix=None):
r"""
Adds a list of reviews.
Returns:
list: review_id_list - review rowids
RESTful:
Method: POST
URL: /api/review/
CommandLine:
python -m ibeis.control.manual_review_funcs --test-add_review
Doctest:
>>> import ibeis
>>> from ibeis.control.manual_review_funcs import *
>>> ibs = ibeis.opendb('testdb1')
>>> ibs.staging.get_table_as_pandas('reviews')
>>> # ensure it is empty
>>> rowids = ibs.staging.get_all_rowids('reviews')
>>> ibs.staging.delete_rowids('reviews', rowids)
>>> ut.exec_funckw(ibs.add_review, globals())
>>> # Add some dummy reviews
>>> aid_1_list = [1, 2, 3, 2]
>>> aid_2_list = [2, 3, 4, 3]
>>> evidence_decision_list = [1, 0, 1, 2]
>>> new_rowids = ibs.add_review(aid_1_list, aid_2_list,
>>> evidence_decision_list)
>>> assert new_rowids == [1, 2, 3, 4]
>>> table = ibs.staging.get_table_as_pandas('reviews')
>>> print(table)
>>> # Then delete them
>>> ibs.staging.delete_rowids('reviews', new_rowids)
"""
assert len(aid_1_list) == len(aid_2_list)
assert len(aid_1_list) == len(evidence_decision_list)
diff_list = -np.array(aid_2_list)
assert np.all(diff_list != 0), (
'Cannot add a review state between an aid and itself')
n_input = len(aid_1_list)
# Order aid_1_list and aid_2_list pairs so that aid_1_list is always lower
aid_pair_list = [e_(u, v) for u, v in zip(aid_1_list, aid_2_list)]
aid_1_list = [pair[0] for pair in aid_pair_list]
aid_2_list = [pair[1] for pair in aid_pair_list]
if True:
# Get current review counts from database
unique_pairs = list(set(aid_pair_list))
count_base = [
0 if counts is None or len(counts) == 0 else
max(max(counts), len(counts))
for counts in ibs.get_review_counts_from_pairs(unique_pairs)
]
pair_to_count = {
edge: count
for edge, count in zip(unique_pairs, count_base)
}
count_list = []
for edge in aid_pair_list:
pair_to_count[edge] += 1
count = pair_to_count[edge]
count_list.append(count)
# else:
# # Alternative implemenation
# unique_pairs, groupxs = ut.group_indices(aid_pair_list)
# unique_base = [
# 0 if counts is None or len(counts) == 0 else
# max(max(counts), len(counts))
# for counts in ibs.get_review_counts_from_pairs(unique_pairs)
# ]
# grouped_base = [[b] * len(g) for b, g in zip(unique_base, groupxs)]
# grouped_offsets = [list(range(n)) for n in map(len, groupxs)]
# base = np.array(ut.ungroup(grouped_base, groupxs))
# offsets = np.array(ut.ungroup(grouped_offsets, groupxs))
# count_list = offsets + base + 1
if review_uuid_list is None:
review_uuid_list = [uuid.uuid4() for _ in range(n_input)]
if meta_decision_list is None:
meta_decision_list = [None for _ in range(n_input)]
if identity_list is None:
# identity_list = [ut.get_computer_name()] * len(aid_1_list)
identity_list = [None] * n_input
if tags_list is None:
tag_str_list = [None] * n_input
else:
tag_str_list = [';'.join(map(str, tag_list)) for tag_list in tags_list]
if user_confidence_list is None:
user_confidence_list = [None] * n_input
if review_client_start_time_posix is None:
review_client_start_time_posix = [None] * n_input
if review_client_end_time_posix is None:
review_client_end_time_posix = [None] * n_input
if review_server_start_time_posix is None:
review_server_start_time_posix = [None] * n_input
if review_server_end_time_posix is None:
review_server_end_time_posix = [None] * n_input
assert n_input == len(identity_list)
assert n_input == len(tag_str_list)
assert n_input == len(user_confidence_list)
assert n_input == len(review_uuid_list)
assert n_input == len(count_list)
superkey_paramx = (0, 1, 2, )
# TODO Allow for better ensure=False without using partial
# Just autogenerate these functions
colnames = [REVIEW_AID1, REVIEW_AID2, REVIEW_COUNT,
REVIEW_UUID, REVIEW_EVIDENCE_DECISION, REVIEW_META_DECISION,
REVIEW_USER_IDENTITY, REVIEW_USER_CONFIDENCE, REVIEW_TAGS,
REVIEW_TIME_CLIENT_START, REVIEW_TIME_CLIENT_END,
REVIEW_TIME_SERVER_START, REVIEW_TIME_SERVER_END]
params_iter = list(zip(aid_1_list, aid_2_list, count_list,
review_uuid_list, evidence_decision_list,
meta_decision_list, identity_list,
user_confidence_list, tag_str_list,
review_client_start_time_posix,
review_client_end_time_posix,
review_server_start_time_posix,
review_server_end_time_posix))
review_rowid_list = ibs.staging.add_cleanly(const.REVIEW_TABLE, colnames,
params_iter,
ibs.get_review_rowid_from_superkey,
superkey_paramx)
return review_rowid_list
@register_ibs_method
@accessor_decors.deleter
#@cache_invalidator(const.REVIEW_TABLE)
@register_api('/api/review/', methods=['DELETE'])
def delete_review(ibs, review_rowid_list):
r"""
deletes reviews from the database
RESTful:
Method: DELETE
URL: /api/review/
"""
if ut.VERBOSE:
print('[ibs] deleting %d reviews' % len(review_rowid_list))
ibs.staging.delete_rowids(const.REVIEW_TABLE, review_rowid_list)
@register_ibs_method
def get_review_rowids_from_edges(ibs, edges, eager=True, nInput=None,
directed=False):
colnames = (REVIEW_ROWID,)
# Order aid_1_list and aid_2_list pairs so that aid_1_list is always lower
# params_iter = edges
if directed:
params_iter = edges
else:
params_iter = [e_(u, v) for u, v in edges]
where_colnames = [REVIEW_AID1, REVIEW_AID2]
review_rowids_list = ibs.staging.get_where_eq(
const.REVIEW_TABLE, colnames, params_iter, where_colnames,
eager=eager, nInput=nInput, unpack_scalars=False)
return review_rowids_list
@register_ibs_method
@accessor_decors.getter_1to1
def get_review_exists_from_edges(ibs, edges, eager=True, nInput=None):
# Order aid_1_list and aid_2_list pairs so that aid_1_list is always lower
# params_iter = (e_(u, v) for u, v in edges)
params_iter = edges
where_colnames = [REVIEW_AID1, REVIEW_AID2]
exists_list = ibs.staging.exists_where_eq(
const.REVIEW_TABLE, params_iter, where_colnames,
eager=False, nInput=nInput, unpack_scalars=True)
exists_list = map(bool, exists_list)
if eager:
exists_list = list(exists_list)
return exists_list
@register_ibs_method
@accessor_decors.getter_1toM
@register_api('/api/review/rowids/tuple/', methods=['GET'], __api_plural_check__=False)
def get_review_rowids_from_aid_tuple(ibs, aid_1_list, aid_2_list, eager=True, nInput=None):
r"""
Aid pairs are undirected
Returns:
list_ (list): review_rowid_list - review rowid list of lists
RESTful:
Method: GET
URL: /api/review/rowid/tuple/
"""
# Order aid_1_list and aid_2_list pairs so that aid_1_list is always lower
edges = (e_(u, v) for u, v in zip(aid_1_list, aid_2_list))
return get_review_rowids_from_edges(ibs, edges, eager=eager, nInput=nInput)
# colnames = (REVIEW_ROWID,)
# where_colnames = [REVIEW_AID1, REVIEW_AID2]
# review_rowids_list = ibs.staging.get_where_eq(
# const.REVIEW_TABLE, colnames, params_iter, where_colnames,
# eager=eager, nInput=nInput, unpack_scalars=False)
# return review_rowids_list
@register_ibs_method
def get_review_rowids_between(ibs, aids1, aids2=None, method=None):
"""
Find staging rowids between sets of aids
Doctest:
>>> from ibeis.control.manual_review_funcs import *
>>> import ibeis
>>> ibs = ibeis.opendb('PZ_MTEST')
>>> aids1 = aids2 = [1, 2, 3, 4, 5, 6]
>>> rowids_between = ibs.get_review_rowids_between
>>> ids1 = sorted(rowids_between(aids1, aids2, method=1))
>>> ids2 = sorted(rowids_between(aids1, aids2, method=2))
>>> assert len(ub.find_duplicates(ids1)) == 0
>>> assert len(ub.find_duplicates(ids2)) == 0
>>> assert ids1 == ids2
"""
if aids2 is None:
aids2 = aids1
if method is None:
if len(aids1) * len(aids2) > 5000:
method = 1
else:
method = 2
if method == 1:
# Strategy 1: get all existing rows and see what intersects
# This is better when the enumerated set of rows would be larger than
# the database size
rowids11 = set(ut.flatten(ibs.get_review_rowids_from_aid1(aids1)))
rowids12 = set(ut.flatten(ibs.get_review_rowids_from_aid2(aids1)))
if aids1 is aids2:
rowids = list(reduce(set.intersection, [rowids11, rowids12]))
else:
rowids21 = set(ut.flatten(ibs.get_review_rowids_from_aid1(aids2)))
rowids22 = set(ut.flatten(ibs.get_review_rowids_from_aid2(aids2)))
rowids = list(reduce(set.intersection, [rowids11, rowids12,
rowids21, rowids22]))
elif method == 2:
# Strategy 2: enumerate what rows could exist and see what does exist
# This is better when the enumerated set of rows would be smaller than
# the database size
edges = list(ut.product_nonsame(aids1, aids2))
if len(edges) == 0:
rowids = []
else:
rowids = ibs.get_review_rowids_from_edges(edges, directed=True)
if rowids is None:
rowids = []
rowids = ut.filter_Nones(rowids)
rowids = ut.flatten(rowids)
else:
raise ValueError('no method=%r' % (method,))
return rowids
# @register_ibs_method
# @accessor_decors.getter_1to1
# @register_api('/api/review/rowids/undirected/', methods=['GET'], __api_plural_check__=False)
# def get_review_rowids_from_undirected_tuple(ibs, aid_1_list, aid_2_list):
# aids1, aids2 = aid_1_list, aid_2_list
# review_rowids_dir1 = ibs.get_review_rowids_from_aid_tuple(aids1, aids2)
# review_rowids_dir2 = ibs.get_review_rowids_from_aid_tuple(aids2, aids1)
# def _join_rowids(rowids1, rowids2):
# if rowids1 is None and rowids2 is None:
# return None
# else:
# if rowids1 is None:
# rowids1 = []
# elif rowids2 is None:
# rowids2 = []
# return rowids1 + rowids2
# review_rowids_list = [
# _join_rowids(rowids1, rowids2)
# for rowids1, rowids2 in zip(review_rowids_dir1, review_rowids_dir2)
# ]
# return review_rowids_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/count/', methods=['GET'])
def get_review_count(ibs, review_rowid_list):
review_count_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_COUNT,), review_rowid_list)
return review_count_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/counts/tuple/', methods=['GET'], __api_plural_check__=False)
def get_review_counts_from_tuple(ibs, aid_1_list, aid_2_list, eager=True, nInput=None):
r"""
Returns:
list_ (list): review_counts_list - review counts
RESTful:
Method: GET
URL: /api/review/counts/tuple/
"""
aid_pairs = zip(aid_1_list, aid_2_list)
review_counts_list = ibs.get_review_counts_from_pairs(aid_pairs)
return review_counts_list
@register_ibs_method
def get_review_counts_from_pairs(ibs, aid_pairs, eager=True, nInput=None):
r"""
Returns:
list_ (list): review_counts_list - review counts
RESTful:
Method: GET
URL: /api/review/counts/tuple/
"""
colnames = (REVIEW_COUNT,)
params_iter = aid_pairs
where_colnames = [REVIEW_AID1, REVIEW_AID2]
review_counts_list = ibs.staging.get_where_eq(
const.REVIEW_TABLE, colnames, params_iter, where_colnames,
eager=eager, nInput=nInput, unpack_scalars=False)
return review_counts_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/decision/', methods=['GET'])
def get_review_decision(ibs, review_rowid_list):
review_decision_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_EVIDENCE_DECISION,), review_rowid_list)
return review_decision_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/uuid/', methods=['GET'])
def get_review_uuid(ibs, review_rowid_list):
review_uuid_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_UUID,), review_rowid_list)
return review_uuid_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/decision/str/', methods=['GET'])
def get_review_decision_str(ibs, review_rowid_list):
review_decision_list = ibs.get_review_decision(review_rowid_list)
review_decision_str_list = [
const.EVIDENCE_DECISION.INT_TO_NICE.get(review_decision)
for review_decision in review_decision_list
]
return review_decision_str_list
@register_ibs_method
@register_api('/api/review/decisions/only/', methods=['GET'], __api_plural_check__=False)
def get_review_decisions_from_only(ibs, aid_list, eager=True, nInput=None):
r"""
Returns:
list_ (list): review_tuple_decisions_list - review decisions
RESTful:
Method: GET
URL: /api/review/decisions/only/
"""
colnames = (REVIEW_AID1, REVIEW_AID2, REVIEW_EVIDENCE_DECISION,)
params_iter = [ (aid, ) for aid in aid_list ]
where_clause = '%s=?' % (REVIEW_AID1)
review_tuple_decisions_list = ibs.staging.get_where(const.REVIEW_TABLE, colnames,
params_iter, where_clause,
unpack_scalars=False)
return review_tuple_decisions_list
@register_ibs_method
@register_api('/api/review/rowids/only/', methods=['GET'], __api_plural_check__=False)
def get_review_rowids_from_only(ibs, aid_list, eager=True, nInput=None):
r"""
Returns:
list_ (list): review_rowids
RESTful:
Method: GET
URL: /api/review/rowids/only/
"""
colnames = (REVIEW_ROWID,)
params_iter = [ (aid, ) for aid in aid_list ]
where_clause = '%s=?' % (REVIEW_AID1)
review_rowids = ibs.staging.get_where(const.REVIEW_TABLE, colnames,
params_iter, where_clause,
unpack_scalars=False)
return review_rowids
@register_ibs_method
def get_review_rowids_from_single(ibs, aid_list, eager=True, nInput=None):
colnames = (REVIEW_ROWID,)
params_iter = [(aid, aid) for aid in aid_list]
where_clause = '%s=? OR %s=?' % (REVIEW_AID1, REVIEW_AID2)
review_rowids = ibs.staging.get_where(
const.REVIEW_TABLE, colnames, params_iter, where_clause=where_clause,
unpack_scalars=False)
return review_rowids
@register_ibs_method
def get_review_rowids_from_aid1(ibs, aid_list, eager=True, nInput=None):
colnames = (REVIEW_ROWID,)
params_iter = [(aid,) for aid in aid_list]
where_clause = '%s=?' % (REVIEW_AID1,)
review_rowids = ibs.staging.get_where(
const.REVIEW_TABLE, colnames, params_iter, where_clause=where_clause,
unpack_scalars=False)
return review_rowids
@register_ibs_method
def get_review_rowids_from_aid2(ibs, aid_list, eager=True, nInput=None):
colnames = (REVIEW_ROWID,)
params_iter = [(aid,) for aid in aid_list]
where_clause = '%s=?' % (REVIEW_AID2,)
review_rowids = ibs.staging.get_where(
const.REVIEW_TABLE, colnames, params_iter, where_clause=where_clause,
unpack_scalars=False)
return review_rowids
# @register_ibs_method
# @accessor_decors.getter_1to1
# @register_api('/api/review/decisions/single/', methods=['GET'], __api_plural_check__=False)
# def get_review_decisions_from_single(ibs, aid_list, eager=True, nInput=None):
# r"""
# Returns:
# list_ (list): review_tuple_decisions_list - review decisions
# RESTful:
# Method: GET
# URL: /api/review/identities/single/
# """
# colnames = (REVIEW_AID1, REVIEW_AID2, REVIEW_EVIDENCE_DECISION,)
# params_iter = zip(aid_list, aid_list, )
# where_colnames = [REVIEW_AID1, REVIEW_AID2]
# review_tuple_decisions_list = ibs.staging.get_where_eq(
# const.REVIEW_TABLE, colnames, params_iter, where_colnames,
# eager=eager, nInput=nInput, op='OR', unpack_scalars=False)
# return review_tuple_decisions_list
# @register_ibs_method
# @accessor_decors.getter_1to1
# @register_api('/api/review/decisions/tuple/', methods=['GET'], __api_plural_check__=False)
# def get_review_decisions_from_tuple(ibs, aid_1_list, aid_2_list, eager=True, nInput=None):
# r"""
# Returns:
# list_ (list): review_decisions_list - review decisions
# RESTful:
# Method: GET
# URL: /api/review/identities/tuple/
# """
# colnames = (REVIEW_EVIDENCE_DECISION,)
# params_iter = zip(aid_1_list, aid_2_list)
# where_colnames = [REVIEW_AID1, REVIEW_AID2]
# review_decisions_list = ibs.staging.get_where_eq(
# const.REVIEW_TABLE, colnames, params_iter, where_colnames,
# eager=eager, nInput=nInput, unpack_scalars=False)
# return review_decisions_list
# @register_ibs_method
# @accessor_decors.getter_1to1
# @register_api('/api/review/decisions/str/tuple/', methods=['GET'], __api_plural_check__=False)
# def get_review_decisions_str_from_tuple(ibs, aid_1_list, aid_2_list, **kwargs):
# r"""
# Returns:
# list_ (list): review_decisions_list - review decisions
# RESTful:
# Method: GET
# URL: /api/review/identities/str/tuple/
# """
# review_decisions_list = ibs.get_review_decisions_from_tuple(
# aid_1_list, aid_2_list, **kwargs)
# review_decision_str_list = [
# [
# const.EVIDENCE_DECISION.INT_TO_NICE.get(review_decision)
# for review_decision in review_decision_list
# ]
# for review_decision_list in review_decisions_list
# ]
# return review_decision_str_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/identity/', methods=['GET'])
def get_review_identity(ibs, review_rowid_list):
review_identity_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_USER_IDENTITY,), review_rowid_list)
return review_identity_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/confidence/', methods=['GET'])
def get_review_user_confidence(ibs, review_rowid_list):
user_confidence_list = ibs.staging.get(const.REVIEW_TABLE,
(REVIEW_USER_CONFIDENCE,),
review_rowid_list)
return user_confidence_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/identities/tuple/', methods=['GET'], __api_plural_check__=False)
def get_review_identities_from_tuple(ibs, aid_1_list, aid_2_list, eager=True, nInput=None):
r"""
Returns:
list_ (list): review_identities_list - review identities
RESTful:
Method: GET
URL: /api/review/identities/tuple/
"""
colnames = (REVIEW_USER_IDENTITY,)
params_iter = zip(aid_1_list, aid_2_list)
where_colnames = [REVIEW_AID1, REVIEW_AID2]
review_identities_list = ibs.staging.get_where_eq(
const.REVIEW_TABLE, colnames, params_iter, where_colnames,
eager=eager, nInput=nInput, unpack_scalars=False)
return review_identities_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/time/posix/', methods=['GET'])
def get_review_posix_time(ibs, review_rowid_list):
return ibs.get_review_posix_server_end_time(review_rowid_list)
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/time/server/start/posix/', methods=['GET'])
def get_review_posix_server_start_time(ibs, review_rowid_list):
review_posix_time_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_TIME_SERVER_START,), review_rowid_list)
return review_posix_time_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/time/client/start/posix/', methods=['GET'])
def get_review_posix_client_start_time(ibs, review_rowid_list):
review_posix_time_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_TIME_CLIENT_START,), review_rowid_list)
return review_posix_time_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/time/client/end/posix/', methods=['GET'])
def get_review_posix_client_end_time(ibs, review_rowid_list):
review_posix_time_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_TIME_CLIENT_END,), review_rowid_list)
return review_posix_time_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/time/server/end/posix/', methods=['GET'])
def get_review_posix_server_end_time(ibs, review_rowid_list):
review_posix_time_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_TIME_SERVER_END,), review_rowid_list)
return review_posix_time_list
@register_ibs_method
@accessor_decors.getter_1to1
def get_review_aid_tuple(ibs, review_rowid_list, eager=True, nInput=None):
colnames = (REVIEW_AID1, REVIEW_AID2,)
params_iter = zip(review_rowid_list)
where_colnames = [REVIEW_ROWID]
aid_tuple_list = ibs.staging.get_where_eq(
const.REVIEW_TABLE, colnames, params_iter, where_colnames,
eager=eager, nInput=nInput)
return aid_tuple_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/time/posix/tuple/', methods=['GET'])
def get_review_posix_times_from_tuple(ibs, aid_1_list, aid_2_list, eager=True, nInput=None):
r"""
Returns:
list_ (list): identity_list - review posix times
RESTful:
Method: GET
URL: /api/review/time/posix/tuple/
"""
colnames = (REVIEW_TIME_SERVER_END,)
params_iter = zip(aid_1_list, aid_2_list)
where_colnames = [REVIEW_AID1, REVIEW_AID2]
review_posix_times_list = ibs.staging.get_where_eq(
const.REVIEW_TABLE, colnames, params_iter, where_colnames,
eager=eager, nInput=nInput, unpack_scalars=False)
return review_posix_times_list
# def _parse_tag_str(tag_str):
# if tag_str is None or len(tag_str) == 0:
# return None
# else:
# return tag_str.split(';')
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/tags/', methods=['GET'], __api_plural_check__=False)
def get_review_tags(ibs, review_rowid_list):
review_tag_str_list = ibs.staging.get(const.REVIEW_TABLE, (REVIEW_TAGS,), review_rowid_list)
review_tags_list = [
None if review_tag_str is None or len(review_tag_str) == 0 else review_tag_str.split(';')
for review_tag_str in review_tag_str_list
]
return review_tags_list
@register_ibs_method
@accessor_decors.getter_1to1
@register_api('/api/review/tags/tuple/', methods=['GET'], __api_plural_check__=False)
def get_review_tags_from_tuple(ibs, aid_1_list, aid_2_list, eager=True, nInput=None):
r"""
Returns:
list_ (list): review_tags_list - review tags (list of strings)
RESTful:
Method: GET
URL: /api/review/tags/tuple/
"""
colnames = (REVIEW_TAGS,)
params_iter = zip(aid_1_list, aid_2_list)
where_colnames = [REVIEW_AID1, REVIEW_AID2]
review_tag_strs_list = ibs.staging.get_where_eq(
const.REVIEW_TABLE, colnames, params_iter, where_colnames,
eager=eager, nInput=nInput, unpack_scalars=False)
review_tags_list = [
[]
if review_tag_str_list is None else
[
None if review_tag_str is None or len(review_tag_str) == 0 else review_tag_str.split(';')
for review_tag_str in review_tag_str_list
]
for review_tag_str_list in review_tag_strs_list
]
return review_tags_list
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.control.manual_review_funcs
python -m ibeis.control.manual_review_funcs --allexamples
python -m ibeis.control.manual_review_funcs --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
# coding=utf-8
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Hendrik Volkmer, Thijs Metsch
# Copyright (c) 2013 Daniele Stroppa
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the SmartOS VM management system (vmadm)
"""
from oslo.config import cfg
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import jsonutils
from nova.virt import driver
from nova.virt.smartosapi import vmops
import socket
LOG = logging.getLogger(__name__)
smartos_opts = [
cfg.StrOpt('rescue_image_id',
default=None,
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
default=None,
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
default=None,
help='Rescue ari image'),
cfg.StrOpt('smartos_type',
default='zone',
help='smartos domain type (valid options are: '
'kvm, zone)'),
cfg.StrOpt('smartos_uri',
default='',
help='Override the default smartos URI '
'(which is dependent on smartos_type)'),
cfg.BoolOpt('smartos_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('smartos_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('smartos_inject_partition',
default=1,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
default=None,
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('smartos_vif_driver',
default='nova.virt.smartos.vif.smartosGenericVIFDriver',
help='The smartos VIF driver to configure the VIFs.'),
cfg.ListOpt('smartos_volume_drivers',
default=[
'iscsi=nova.virt.smartos.volume.smartosISCSIVolumeDriver',
'local=nova.virt.smartos.volume.smartosVolumeDriver',
'fake=nova.virt.smartos.volume.smartosFakeVolumeDriver',
'rbd=nova.virt.smartos.volume.smartosNetVolumeDriver',
'sheepdog=nova.virt.smartos.volume.smartosNetVolumeDriver',
'nfs=nova.virt.smartos.volume.smartosNFSVolumeDriver',
'aoe=nova.virt.smartos.volume.smartosAOEVolumeDriver',
'glusterfs='
'nova.virt.smartos.volume.smartosGlusterfsVolumeDriver',
'fibre_channel=nova.virt.smartos.volume.'
'smartosFibreChannelVolumeDriver',
'scality='
'nova.virt.smartos.volume.smartosScalityVolumeDriver',
],
help='smartos handlers for remote volumes.'),
cfg.StrOpt('smartos_disk_prefix',
default=None,
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on smartos_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('smartos_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('smartos_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' smartos calls'),
cfg.StrOpt('smartos_snapshots_directory',
default='$instances_path/snapshots',
help='Location where smartos driver will store snapshots '
'before uploading them to image service'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: ["file=directsync","block=none"]'),
]
CONF = cfg.CONF
CONF.register_opts(smartos_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
class Failure(Exception):
"""Base Exception class for handling task failures."""
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
def get_connection(_read_only):
"""Sets up the smartOS connection."""
return SmartOSDriver(None)
class SmartOSDriver(driver.ComputeDriver):
"""The smartOS host connection object."""
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(SmartOSDriver, self).__init__(virtapi)
self._host_state = None
self.read_only = read_only
self._vmops = vmops.SmartOSOps()
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""
Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
return self._vmops.list_instances_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, network_info)
def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance."""
LOG.error("HXO: instance %s" % repr(instance))
self._vmops.destroy(instance, network_info)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_info(instance)
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
"""Return volume connector information"""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn.
# return {
# 'ip': FLAGS.vmwareapi_host_ip,
# 'initiator': None
# }
raise NotImplementedError()
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance."""
pass
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
pass
def get_disk_available_least(self):
return 100
#def update_available_resource(self, ctxt, host):
# """Updates compute manager resource info on ComputeNode table.
# This method is called as an periodic tasks and is used only
# in live migration currently.
# :param ctxt: security context
# :param host: hostname that compute manager is currently running
# """
#
# try:
# service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
# except exception.NotFound:
# raise exception.ComputeServiceUnavailable(host=host)
#
# # Updating host information
# dic = {'vcpus': self.get_vcpu_total(),
# 'memory_mb': self.get_memory_mb_total(),
# 'local_gb': self.get_local_gb_total(),
# 'vcpus_used': self.get_vcpu_used(),
# 'memory_mb_used': self.get_memory_mb_used(),
# 'local_gb_used': self.get_local_gb_used(),
# 'hypervisor_type': self.get_hypervisor_type(),
# 'hypervisor_version': self.get_hypervisor_version(),
# 'hypervisor_hostname': self.get_hypervisor_hostname(),
# 'cpu_info': self.get_cpu_info(),
# 'service_id': service_ref['id'],
# 'disk_available_least': self.get_disk_available_least()}
#
# compute_node_ref = service_ref['compute_node']
# if not compute_node_ref:
# LOG.info(_('Compute_service record created for %s ') % host)
# db.compute_node_create(ctxt, dic)
# else:
# LOG.info(_('Compute_service record updated for %s ') % host)
# db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
:param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
'local_gb': self.get_local_gb_total(),
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
'local_gb_used': self.get_local_gb_used(),
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
'disk_available_least': self.get_disk_available_least()}
return dic
def get_hypervisor_hostname(self):
return socket.gethostname()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
raise NotImplementedError()
def update_host_status(self):
"""Refresh host stats"""
return self.host_state.update_status()
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self.read_only)
return self._host_state
def get_host_stats(self, refresh=False):
"""Return currently known host stats"""
return self.host_state.get_host_stats(refresh=refresh)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
@staticmethod
def get_vcpu_total():
# Use psrinfo
return 10
@staticmethod
def get_vcpu_used():
# vmadm list -o vcpus
return 0
@staticmethod
def get_cpu_info():
cpu_info = dict()
cpu_info['arch'] = "x86_64"
cpu_info['model'] = "Xeon"
cpu_info['vendor'] = "Intel"
topology = dict()
topology['sockets'] = 1
topology['cores'] = 2
topology['threads'] = 4
cpu_info['topology'] = topology
features = list()
features.append("sse")
cpu_info['features'] = features
guest_arches = list()
guest_arches.append("i386")
guest_arches.append("x86_64")
cpu_info['permitted_instance_types'] = guest_arches
# TODO: See smartos/driver.py:2149
return jsonutils.dumps(cpu_info)
@staticmethod
def get_memory_mb_total():
# prtconf |grep -i mem
return 12000
@staticmethod
def get_memory_mb_used():
# echo ::memstat | mdb -k
return 0
@staticmethod
def get_local_gb_used():
# zpool list -p zones
return 0
@staticmethod
def get_local_gb_total():
return 20
@staticmethod
def get_hypervisor_type():
return "kvm"
@staticmethod
def get_hypervisor_version():
return 1
class HostState(object):
"""Manages information about the compute node through smartos"""
def __init__(self, read_only):
super(HostState, self).__init__()
self.read_only = read_only
self._stats = {}
self.connection = None
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first."""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from smartos."""
LOG.debug(_("Updating host stats"))
if self.connection is None:
self.connection = get_connection(self.read_only)
data = {"vcpus": self.connection.get_vcpu_total(),
"vcpus_used": self.connection.get_vcpu_used(),
"cpu_info": jsonutils.loads(self.connection.get_cpu_info()),
"disk_total": self.connection.get_local_gb_total(),
"disk_used": self.connection.get_local_gb_used()}
data["disk_available"] = data["disk_total"] - data["disk_used"]
data["host_memory_total"] = self.connection.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.connection.get_memory_mb_used())
data["hypervisor_type"] = self.connection.get_hypervisor_type()
data["hypervisor_version"] = self.connection.get_hypervisor_version()
data["hypervisor_hostname"] = self.connection.get_hypervisor_hostname()
self._stats = data
return data
|
|
import json
import time
import requests
import logging
from datetime import datetime
from urllib.parse import urljoin
from .exceptions import AuthenticationError, ResourceNotFoundError, InternalServerError, ConnectionError
JSON_HTTP_HEADERS = {'content-type': 'application/json'}
class RequestsMixin:
def __init__(self):
self.entity_id = ""
self.api_key = ""
self.session = requests.Session()
self.connected = False
self.set_hpit_root_url('https://www.hpit-project.org')
self.set_requests_log_level('debug')
self._add_hooks('pre_connect', 'post_connect', 'pre_disconnect', 'post_disconnect')
def set_hpit_root_url(self, root_url):
self._hpit_root_url = root_url
def set_requests_log_level(self, log_level):
requests_log = logging.getLogger("requests")
if log_level == 'warning':
requests_log.setLevel(logging.WARNING)
elif log_level == 'debug':
requests_log.setLevel(logging.DEBUG)
elif log_level == 'info':
requests_log.setLevel(logging.INFO)
self._requests_log_level = log_level
def connect(self, retry=True):
"""
Register a connection with the HPIT Server.
This essentially sets up a session and logs that you are actively using
the system. This is mostly used to track plugin use with the site.
"""
self._try_hook('pre_connect')
self._post_data('connect', {
'entity_id': self.entity_id,
'api_key': self.api_key
}, retry=retry
)
self.connected = True
self._try_hook('post_connect')
return self.connected
def disconnect(self, retry=True):
"""
Tells the HPIT Server that you are not currently going to poll
the server for messages or responses. This also destroys the current session
with the HPIT server.
"""
self._try_hook('pre_disconnect')
self._post_data('disconnect', {
'entity_id': self.entity_id,
'api_key': self.api_key
}, retry=retry
)
self.connected = False
self._try_hook('post_disconnect')
return self.connected
def _post_data(self, url, data=None, retry=True):
"""
Sends arbitrary data to the HPIT server. This is mainly a thin
wrapper ontop of requests that ensures we are using sessions properly.
Returns: requests.Response : class - The response from HPIT. Normally a 200:OK.
"""
url = urljoin(self._hpit_root_url, url)
failure_count = 0
while failure_count < 3:
try:
if data:
response = self.session.post(url, data=json.dumps(data), headers=JSON_HTTP_HEADERS)
else:
response = self.session.post(url)
if response is None:
raise ConnectionError("Connection was reset by a peer or the server rebooted.")
if response.status_code == 200:
return response
elif response.status_code == 403:
raise AuthenticationError("Request could not be authenticated")
elif response.status_code == 404:
raise ResourceNotFoundError("Requested resource not found")
elif response.status_code == 500:
raise InternalServerError("Internal server error")
return response
except requests.exceptions.ConnectionError:
if failure_count == 3:
raise ConnectionError("Could not connect to server. Tried 3 times.")
failure_count += 1
continue
#It looks like the server went down. Wait 5 minutes and try again
if retry:
return self._attempt_reconnection(lambda: self._post_data(url, data))
raise ConnectionError("Connection was reset by a peer or the server stopped responding.")
def _get_data(self, url, retry=True):
"""
Gets arbitrary data from the HPIT server. This is mainly a thin
wrapper on top of requests that ensures we are using session properly.
Returns: dict() - A Python dictionary representing the JSON recieved in the request.
"""
url = urljoin(self._hpit_root_url, url)
failure_count = 0
while failure_count < 3:
try:
response = self.session.get(url)
if response is None:
raise ConnectionError("Connection was reset by a peer or the server rebooted.")
if response.status_code == 200:
return response.json()
elif response.status_code == 403:
raise AuthenticationError("Request could not be authenticated")
elif response.status_code == 404:
raise ResourceNotFoundError("Requested resource not found")
elif response.status_code == 500:
raise InternalServerError("Internal server error")
return response
except requests.exceptions.ConnectionError as e:
if failure_count == 3:
raise e
failure_count += 1
continue
#It looks like the server went down. Wait 5 minutes and try again
if retry:
return self._attempt_reconnection(lambda: self._get_data(url))
raise ConnectionError("Connection was reset by a peer or the server stopped responding.")
def _attempt_reconnection(self, callback):
self.connected = False
print("Looks like the server went down. Waiting 5 minutes...")
failure_count = 0
while failure_count < 3:
time.sleep(300)
#Just hit the front page
response = self.session.get(self._hpit_root_url)
if response and response.status_code == 200:
print("Server looks like it finished rebooting... attempting reconnect.")
try:
self.connect(retry=False)
except: #Still having problems
failure_count += 1
continue
print("Successfully reconnected... continuing as normal")
return callback()
else:
failure_count += 1
raise ConnectionError("Could not reconnect to the server. Shutting down.")
def send_log_entry(self, text):
"""
Send a log entry to the HPIT server.
"""
self._post_data("log", data={'log_entry':text})
#Log to file if a logger variable is set on this class instance
logger = getattr(self, 'logger', None)
if logger:
logger.debug(text)
def _add_hooks(self, *hooks):
"""
Adds hooks to this class. If the function is already defined, this leaves that definition. If
it doesn't exists the hook is created and set to None
"""
for hook in hooks:
if not hasattr(self, hook):
setattr(self, hook, None)
def _try_hook(self, hook_name):
"""
Try's to call a signal hook. Hooks take in no parameters and return a boolean result.
True will cause the plugin to continue execution.
False will cause the plugon to stop execution.
"""
hook = getattr(self, hook_name, None)
if hook:
return hook()
else:
return True
|
|
#!/home/ubuntu/anaconda2/envs/tensorflow/bin/python
# MIT License
# Copyright (c) 2016 Druce Vertes [email protected]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pull requests gratefully accepted.
# possible improvements
# - run fast on GPU. currently pin to device("/cpu:0") which runs ~5x faster
# - optimizer with adaptive learning rate and momentum - AdamOptimizer gives error
from __future__ import print_function
import argparse
import pickle
from time import strftime
import sys
import six
import random
import pdb
import tensorflow as tf
import numpy as np
import pandas as pd
import lifetable
import matplotlib.pyplot as plt
#%matplotlib inline
# TensorFlow numeric type to use for floating point variables
# tf.float32 is 2x faster but doesn't provide necessary accuracy
# tf.float64 will run out of accuracy for high gamma (> 8)
float_type = tf.float64
############################################################
# returns 1928-2015
############################################################
first_year = 1928
last_year = 2015
years = range(first_year, last_year+1) # pythonically yields [1928, 1929...2015]
years_history = len(years)
years_retired = 30
num_cohorts = years_history - years_retired + 1
num_assets = 2
#gamma = 1.0
sp500 = pd.Series([
0.4381,-0.083,-0.2512,-0.4384,-0.0864,0.4998,-0.0119,0.4674,0.3194,-0.3534,0.2928,-0.011,
-0.1067,-0.1277,0.1917,0.2506,0.1903,0.3582,-0.0843,0.052,0.057,0.183,0.3081,0.2368,0.1815,
-0.0121,0.5256,0.326,0.0744,-0.1046,0.4372,0.1206,0.0034,0.2664,-0.0881,0.2261,0.1642,0.124,
-0.0997,0.238,0.1081,-0.0824,0.0356,0.1422,0.1876,-0.1431,-0.259,0.37,0.2383,-0.0698,0.0651,
0.1852,0.3174,-0.047,0.2042,0.2234,0.0615,0.3124,0.1849,0.0581,0.1654,0.3148,-0.0306,0.3023,
0.0749,0.0997,0.0133,0.372,0.2268,0.331,0.2834,0.2089,-0.0903,-0.1185,-0.2197,0.2836,0.1074,
0.0483,0.1561,0.0548,-0.3655,0.2594,0.1482,0.021,0.1589,0.3215,0.1352,0.0136],
index = years)
bonds=pd.Series([
0.0084,0.042,0.0454,-0.0256,0.0879,0.0186,0.0796,0.0447,0.0502,0.0138,0.0421,0.0441,
0.054,-0.0202,0.0229,0.0249,0.0258,0.038,0.0313,0.0092,0.0195,0.0466,0.0043,-0.003,
0.0227,0.0414,0.0329,-0.0134,-0.0226,0.068,-0.021,-0.0265,0.1164,0.0206,0.0569,0.0168,
0.0373,0.0072,0.0291,-0.0158,0.0327,-0.0501,0.1675,0.0979,0.0282,0.0366,0.0199,0.0361,
0.1598,0.0129,-0.0078,0.0067,-0.0299,0.082,0.3281,0.032,0.1373,0.2571,0.2428,-0.0496,
0.0822,0.1769,0.0624,0.15,0.0936,0.1421,-0.0804,0.2348,0.0143,0.0994,0.1492,-0.0825,
0.1666,0.0557,0.1512,0.0038,0.0449,0.0287,0.0196,0.1021,0.201,-0.1112,0.0846,0.1604,
0.0297,-0.091,0.1075,0.0128],
index=years)
cpi=pd.Series([
-0.0115607,0.005848,-0.0639535,-0.0931677,-0.1027397,0.0076336,0.0151515,0.0298507,
0.0144928,0.0285714,-0.0277778,0,0.0071429,0.0992908,0.0903226,0.0295858,0.0229885,
0.0224719,0.1813187,0.0883721,0.0299145,-0.0207469,0.059322,0.06,0.0075472,0.0074906,
-0.0074349,0.0037453,0.0298507,0.0289855,0.0176056,0.017301,0.0136054,0.0067114,0.0133333,
0.0164474,0.0097087,0.0192308,0.0345912,0.0303951,0.0471976,0.0619718,0.0557029,0.0326633,
0.0340633,0.0870588,0.1233766,0.0693642,0.0486486,0.0670103,0.0901771,0.1329394,0.125163,
0.0892236,0.0382979,0.0379098,0.0394867,0.0379867,0.010979,0.0443439,0.0441941,0.046473,
0.0610626,0.0306428,0.0290065,0.0274841,0.026749,0.0253841,0.0332248,0.017024,0.016119,
0.0268456,0.0338681,0.0155172,0.0237691,0.0187949,0.0325556,0.0341566,0.0254065,0.0408127,
0.0009141,0.0272133,0.0149572,0.0296,0.0174,0.015,0.0076,0.0073],
index=years)
# Default start
# 50/50 allocations
stock_allocations = pd.Series(np.ones(years_retired) * 0.5)
bond_allocations = 1 - stock_allocations
# 2% each const and var spending
startval = 100
const_spend_pct = .02
var_spend_pcts = pd.Series(np.ones(years_retired) * 0.02)
real_stocks = sp500 - cpi
real_bonds = bonds - cpi
############################################################
# generate a life table for a 65 year-old male retiree
# a 30-year retirement
# not used in this notebook example
############################################################
lt = lifetable.genLifetable(lifetable.MlivesSeries, lifetable.MLEseries, 65, 30)
#print(lt)
survival = np.array(lt.survival)
#print(survival)
#print(survival.shape)
class SafeWithdrawalModel:
"""initialize graph and parameters shared by all retirement cohorts"""
def __init__(self,
returns_list, # series with returns for assets
names_list, # names of assets
allocations_list, # list of % allocated to each asset class
start_val, # starting portfolio value e.g. 100
const_spend_pct,
var_spend_pcts,
gamma,
survival,
verbose=False):
# read params, initialize Tensorflow graph and session
# set up ops specific to model
self.verbose=verbose
self.startval=startval
self.returns_list = returns_list
self.names_list = names_list
self.num_assets = len(self.names_list)
self.start_val = start_val
self.ret_years = len(allocations_list[0])
self.const_spend_pct = const_spend_pct
self.var_spend_pcts = var_spend_pcts
self.survival=survival
self.gamma = gamma
# model will have a cohort_history object, optimizer object
# initialize with placeholder, needs rest of model initialized first
self.cohort_history = None
self.optimizer = None
self.first_year = returns_list[0].index[0]
self.last_year = returns_list[0].index[-1]
self.total_cohorts = len(returns_list[0])
self.ret_cohorts = self.total_cohorts - self.ret_years + 1
print('%s Create TensorFlow graph and session' % strftime("%H:%M:%S"))
self.graph = tf.Graph()
self.sess = tf.Session(graph = self.graph)
self.return_ops = []
self.allocation_ops = []
with self.graph.as_default():
with tf.device("/cpu:0"):
# some constants
self.zero = tf.constant(0.0, dtype=float_type, name="zero")
self.one = tf.constant(1.0, dtype=float_type, name="one")
self.one_hundred = tf.constant(100.0, dtype=float_type, name="one_hundred")
self.ten_thousand = tf.constant(10000.0, dtype=float_type, name="ten_thousand")
self.one_hundred_thousand = tf.constant(100000.0, dtype=float_type, name="one_million")
self.one_million = tf.constant(1000000.0, dtype=float_type, name="one_million")
self.very_small_amts = tf.constant(np.array([0.000001] * self.ret_years),
dtype=float_type, name="very_small_amts")
self.zero_years = tf.constant(np.zeros(self.ret_years),
dtype=float_type, name = "zero_years")
self.one_years = tf.constant(np.ones(self.ret_years), dtype=float_type, name="one_years")
self.ret_years_op = tf.constant(self.ret_years, dtype=float_type, name="ret_years")
#gamma
self.gamma_op = tf.constant(gamma, dtype=float_type, name="gamma")
self.one_minus_gamma = tf.sub(self.one, self.gamma, name="one_minus_gamma")
self.inv_one_minus_gamma = tf.div(self.one, self.one_minus_gamma,
name="inv_one_minus_gamma")
self.cost_multiplier = self.ten_thousand
# generate op for start_val
self.start_val_op = tf.constant(100.0, dtype=float_type, name ="port_start_val")
# generate ops for returns
for prefix, return_series in zip(names_list, returns_list):
self.return_ops.append(self.gen_tf_const_list(return_series, "%s_return" % prefix,
verbose=self.verbose))
# only implemented for n=2 assets
# generate ops for allocations for first n-1 assets
prefix = names_list[0]
alloc_series = allocations_list[0]
stock_alloc_ops = self.gen_tf_var_list(alloc_series, "%s_alloc" % prefix,
verbose=self.verbose)
self.allocation_ops.append(stock_alloc_ops)
# ops for soft constraints: 0 < stock allocation < 1
self.alloc_min_0_ops = self.gen_zero_min_list(stock_alloc_ops, "alloc_min_0",
verbose=self.verbose)
self.cost_alloc_min_0_op = tf.mul(self.cost_multiplier,
tf.add_n(self.alloc_min_0_ops,
name="cost_alloc_min_0"),
name="cost_alloc_min_0_mult")
self.alloc_max_1_ops = self.gen_one_max_list(stock_alloc_ops, "alloc_max_1",
verbose=self.verbose)
self.cost_alloc_max_1_op = tf.mul(self.cost_multiplier,
tf.add_n(self.alloc_max_1_ops, name = "cost_alloc_max_1"))
# ops for soft constraints: declining stock allocation
# why? for example, 1966 is the worst cohort, and 1974 is its worst stock return (-40%)
# to maximize CE, optimization sets stock allocation at a minimum to not run out of money
# in worst year. It will go e.g. 80% stock alloc in year 8 and 56% in year 9, return to
# 80% in year 10.To avoid artifacts like that, knowing stock allocation should decline
# over time, we add a large penalty to objective when stock allocation increases
# from one year to next.
self.alloc_decrease_ops = self.gen_diff_list(stock_alloc_ops, "alloc_decrease",
verbose=self.verbose)
self.cost_alloc_decrease_op = tf.mul(self.cost_multiplier,
tf.add_n(self.alloc_decrease_ops,
name="alloc_decrease_cost_op"))
# last asset is 1-previous assets
bond_alloc_ops = []
var_prefix = "%s_alloc" % names_list[1]
print ('%s Create ops for %s' % (strftime("%H:%M:%S"), var_prefix))
for ix, op in enumerate(stock_alloc_ops):
var_name = "%s_%d" % (var_prefix, ix)
if self.verbose:
print('Create %s' % var_name)
var_op = tf.sub(self.one, stock_alloc_ops[ix], name=var_name)
bond_alloc_ops.append(var_op)
self.allocation_ops.append(bond_alloc_ops)
# generate ops for const, var spending
self.const_spend_pct_op = tf.Variable(const_spend_pct, dtype=float_type, name="const_spend_pct")
self.sess.run(self.const_spend_pct_op.initializer)
self.const_spending_op = tf.mul(self.const_spend_pct_op, self.one_hundred, name="const_spend")
self.var_spending_ops = self.gen_tf_var_list(self.var_spend_pcts, "var_spend",
verbose=self.verbose)
# all ops to be trained
self.all_var_ops = [self.const_spend_pct_op] + \
self.var_spending_ops + \
self.allocation_ops[0]
# op for soft constraint: const spending > 0
self.cspend_min_0_op = tf.maximum(self.zero, tf.neg(self.const_spend_pct_op,
name="neg_cspend_min_0_op"),
name="cspend_min_0_op")
self.cost_cspend_min_0_op = tf.mul(self.cost_multiplier,
self.cspend_min_0_op,
name="cost_cspend_min_0")
# op for soft constraint: var spending > 0
self.vspend_min_0_ops = self.gen_zero_min_list(self.var_spending_ops, "vspend_min_0",
verbose=self.verbose)
self.cost_vspend_min_0_op = tf.mul(self.cost_multiplier,
tf.add_n(self.vspend_min_0_ops,
name="cost_vspend_min_0"))
if survival is not None:
survival_array=np.array(survival)
self.survival_tensor = tf.constant(survival_array, dtype=float_type,
name="survival_tensor")
# global step counter
self.step_count = tf.Variable(0, dtype=float_type, name="step_count", trainable=False)
self.increment_step = self.step_count.assign_add(1)
#init op
self.init_op = tf.initialize_all_variables()
def __del__(self):
"""When deleting model, close session, clear default graph"""
print("Destructor reset graph")
try:
with self.graph.as_default():
tf.reset_default_graph()
except Exception, e:
print ("Destructor couldn't reset graph: %s" % str(e))
try:
print ("Destructor close Tensorflow session")
self.sess.close()
except Exception, e:
print ("Destructor couldn't close session: %s" % str(e))
def gen_tf_const_list(self, const_iter, const_prefix, start_index=0, verbose=False):
"""take a list or iterator of values, generate and return tensorflow constant ops for each"""
print ('%s Create constants %s' % (strftime("%H:%M:%S"), const_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
const_list = []
for ix, const in enumerate(const_iter):
const_name = "%s_%d" % (const_prefix, start_index + ix)
if verbose:
print("Set constant %s to %f" % (const_name, const))
const_list.append(tf.constant(const, dtype=float_type, name=const_name))
return const_list
def gen_tf_var_list(self, var_iter, var_prefix, start_index=0, verbose=False):
"""take a list or iterator of values, generate and return tensorflow Variable ops for each"""
print ('%s Create variables %s' % (strftime("%H:%M:%S"), var_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
var_list = []
for ix, var in enumerate(var_iter):
var_name = "%s_%d" % (var_prefix, start_index + ix)
if verbose:
print("Create variable %s to %f" % (var_name, var))
var_op = tf.Variable(var, dtype=float_type, name=var_name)
self.sess.run(var_op.initializer)
var_list.append(var_op)
return var_list
def get_op_from_list(self, op_list, op_index):
"""take a list of ops, return value of op specified by op_index"""
op = op_list[op_index]
retval = self.sess.run([op])
return retval
def gen_zero_min_list(self, op_iter, op_prefix, start_index=0, verbose=False):
"""take a list or iterator of ops, generate and return an op which is max(-op, 0)
for soft constraints > 0"""
print ('%s Create ops for soft constraint %s > 0' % (strftime("%H:%M:%S"), op_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
op_list = []
for ix, op in enumerate(op_iter):
op_name = "%s_%d" % (op_prefix, start_index + ix)
if verbose:
print("Zero_min op %s" % op_name)
new_op = tf.maximum(self.zero, tf.neg(op, name="neg_%s" % op_name), name=op_name)
op_list.append(new_op)
return op_list
def gen_one_max_list(self, op_iter, op_prefix, start_index=0, verbose=False):
"""take a list or iterator of ops, generate and return an op with is max(op-1, 0)
for soft constraints > 0"""
print ('%s Create ops for soft constraint %s < 1' % (strftime("%H:%M:%S"), op_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
op_list = []
for ix, op in enumerate(op_iter):
op_name = "%s_%d" % (op_prefix, start_index + ix)
if verbose:
print('One_max op %s' % op_name)
new_op = tf.maximum(self.zero, tf.sub(op, self.one, name="one_minus_%s" % op_name),
name=op_name)
op_list.append(new_op)
return op_list
def gen_diff_list(self, op_iter, op_prefix, start_index=0, verbose=False):
"""generate and return an op for declining stock alloc constraint over time, max of 0 and decrease"""
print ('%s Create ops for soft constraint, declining stock alloc %s' % (strftime("%H:%M:%S"),
op_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
op_list = []
for ix, op in enumerate(op_iter):
if ix == 0:
continue
op_name = "%s_%d" % (op_prefix, start_index + ix)
if verbose:
print("diff op %s" % op_name)
new_op = tf.maximum(self.zero, tf.sub(op_iter[ix], op_iter[ix-1]))
op_list.append(new_op)
return op_list
def gen_ce(self, input_tensor, prefix, survival_tensor=None, verbose=False):
with tf.device("/cpu:0"):
with self.graph.as_default():
input_length = np.float64(input_tensor.get_shape().as_list()[0])
if verbose:
print("%s Create ce op with gamma: %f" % (strftime("%H:%M:%S"), self.gamma))
if self.gamma == 1.0:
u = tf.reduce_mean(tf.log(input_tensor), name="%s_u" % prefix)
#print(self.sess.run(u))
if survival_tensor is not None:
u0 = u
u = tf.reduce_mean(tf.mul(u0, survival_tensor, name="%s_u_surv" % prefix),
name="%s_u" % prefix)
ce = tf.exp(u, name="%s_ce" % prefix)
if verbose:
print ('%s Create CE op %f' % (strftime("%H:%M:%S"), self.sess.run(ce)))
else:
# for high gamma numerical error is significant, calculation is most accurate near 1
# so divide by mean
input_mean = tf.reduce_mean(input_tensor, name="%s_mean" % prefix)
input_conditioned = tf.div(input_tensor, input_mean, name="%s_conditioned" % prefix)
u1 = tf.pow(input_conditioned, self.one_minus_gamma, name="%s_u1" % prefix)
u2 = tf.sub(u1, self.one, name="%s_u2" % prefix)
u3 = tf.mul(u2, self.inv_one_minus_gamma, name="%s_u3" % prefix)
u = tf.reduce_mean(u3, name="%s_u" % prefix)
if survival_tensor is not None:
u4 = u
u = tf.reduce_mean(tf.mul(u4, survival_tensor, name="%s_u_surv" % prefix),
name="%s_u" % prefix)
ce1 = tf.mul(self.one_minus_gamma, u, name="%s_ce1" % prefix)
ce2 = tf.add(ce1, self.one, name="%s_ce2" % prefix)
ce3 = tf.pow(ce2, self.inv_one_minus_gamma, name="%s_ce3" % prefix)
ce = tf.mul(input_mean, ce3, name="%s_ce" % prefix)
if verbose:
print ('%s Create CE op %f' % (strftime("%H:%M:%S"), self.sess.run(ce)))
return ce
def dump_json(self):
# output each series for chart
df_summary = self.cohort_history.summarize_by_year()
df_years = self.cohort_history.spend_by_year()
plotly_name = "plotly_data_%02.0f" % self.gamma
f = open('%s.json' % plotly_name,'w')
f.write ("%s = [" % plotly_name)
# indiv years
for ix in range(self.first_year, self.first_year + self.ret_cohorts):
print(ix)
f.write ("%s," % str(list(df_years[str(ix)])))
# mean, min, max etc.
f.write ("%s,\n" % str(list(df_summary.spend_mean)))
f.write ("%s,\n" % str(list(df_summary.spend_max)))
f.write ("%s,\n" % str(list(df_summary.spend_min)))
f.write ("%s,\n" % str(list(df_summary.spend_mean - df_summary.spend_sd)))
f.write ("%s,\n" % str(list(df_summary.spend_mean + df_summary.spend_sd)))
f.write ("];\n")
f.close()
class Cohort:
"""Cohort represents experience of an individual
- retiring in a given year
- using the specified SafeWithdrawal model"""
def __init__(self, model, cohort_start_year):
self.model = model
self.cohort_start_year = cohort_start_year
self.name = "cohort_%d" % cohort_start_year
self.gen_tf_ops()
def gen_tf_ops(self, verbose=False):
if verbose:
print("%s Instantiating cohort %s" % (strftime("%H:%M:%S"), self.name))
stock_returns = self.model.return_ops[0]
bond_returns = self.model.return_ops[1]
stock_allocs = self.model.allocation_ops[0]
bond_allocs = self.model.allocation_ops[1]
self.port_returns_list = []
self.port_prespend_list = []
self.port_end_vals_list = []
self.spend_amts_list = []
self.spend_amts_nonzero_list = []
with self.model.graph.as_default():
with tf.device("/cpu:0"):
if verbose:
print ("%s Generating %d years from %d" % (strftime("%H:%M:%S"),
self.model.ret_years,
self.cohort_start_year))
start_year_ix = self.cohort_start_year - self.model.first_year
for ix in range(self.model.ret_years):
op_stock_return = stock_returns[start_year_ix + ix]
op_stock_alloc = stock_allocs[ix]
op_bond_return = bond_returns[start_year_ix + ix]
op_bond_alloc = bond_allocs[ix]
op_const_spend = self.model.const_spending_op
op_var_spend = self.model.var_spending_ops[ix]
op_total_real_return = tf.add(tf.mul(op_stock_alloc, op_stock_return, name="%s_stock_%d"
% (self.name, ix)),
tf.mul(op_bond_alloc, op_bond_return, name="%s_bond_%d"
% (self.name, ix)),
name="%s_total_return_%d" % (self.name, ix))
self.port_returns_list.append(op_total_real_return)
if ix == 0:
prev_val = self.model.start_val_op
else:
prev_val = self.port_end_vals_list[ix-1]
op_port_end_val_prespend = tf.add(prev_val,
tf.mul(prev_val, self.port_returns_list[ix],
name="%s_dolreturn_%d" % (self.name, ix)),
name="%s_prespend_%d" % (self.name, ix))
self.port_prespend_list.append(op_port_end_val_prespend)
desired_spend_amt = tf.add(tf.mul(op_var_spend, op_port_end_val_prespend,
name="%s_des_vspend_%d" % (self.name, ix)),
op_const_spend,
name="%s_desired_spend_amt_%d" % (self.name, ix))
#spend minimum of tmp_spend_amt, port value
spend_amt = tf.minimum(desired_spend_amt, op_port_end_val_prespend,
name="%s_actual_spend_amt_%d" % (self.name, ix))
self.spend_amts_list.append(spend_amt)
op_port_end_val = tf.sub(op_port_end_val_prespend, spend_amt, name="%s_endval_%d" %
(self.name, ix))
self.port_end_vals_list.append(op_port_end_val)
#now that we've computed cohort paths we pack results into 1D Tensors to calc objective
self.spend_amts = tf.pack(self.spend_amts_list, name="%s_spend_amts" % self.name)
self.port_end_vals = tf.pack(self.port_end_vals_list, name="%s_port_end_vals" % self.name)
self.mean_spending = tf.reduce_mean(self.spend_amts, name="%s_mean_spending" % self.name)
self.sd_spending = tf.sqrt(tf.reduce_mean(tf.pow(tf.sub(self.spend_amts,
self.mean_spending), 2)),
name="%s_sd_spending" % self.name)
self.min_spending = tf.reduce_min(self.spend_amts, name="%s_min_spending" % self.name)
self.max_spending = tf.reduce_max(self.spend_amts, name="%s_max_spending" % self.name)
if self.model.gamma == 1.0:
#spend a tiny amount even if spend is 0 so log is not NaN
#doesn't really seem like best practice but...
#0 spend years can't be in final solution
#and don't want divide by zero errors if optimizer attempts one
#chain new op off old op but keep a reference to old op around just in case
self.spend_amts_maybe_zero = self.spend_amts
self.spend_amts = tf.maximum(self.spend_amts_maybe_zero,
self.model.very_small_amts,
name="%s_actual_spend_nonzero" % self.name)
self.total_spending = tf.reduce_sum(self.spend_amts, name="%s_total_spending_nonzero" %
self.name)
else:
self.total_spending = tf.reduce_sum(self.spend_amts, name="%s_total_spending" %
self.name)
if self.model.survival is not None:
self.ce = self.model.gen_ce_survival(self.spend_amts,
self.model.survival_tensor,
"%s_ce" % self.name)
else:
self.ce = self.model.gen_ce(self.spend_amts,
"%s_ce" % self.name)
#print (self.as_dataframe())
def get_tf_ops(self):
return self.model.start_val, self.port_returns_list, self.port_prespend_list, \
self.spend_amts_list, self.port_end_vals_list, self.total_spending
def as_dataframe(self):
port_returns_ops = self.port_returns_list
port_prespend_ops = self.port_prespend_list
spend_amts_ops = self.spend_amts_list
port_end_vals_ops = self.port_end_vals_list
port_returns = self.model.sess.run(port_returns_ops)
port_prespend = self.model.sess.run(port_prespend_ops)
spend_amts = self.model.sess.run(spend_amts_ops)
port_end_vals = self.model.sess.run(port_end_vals_ops)
retlist = []
for ix in range(self.model.ret_years):
retlist.append([port_returns[ix],
port_prespend[ix],
spend_amts[ix],
port_end_vals[ix]
])
years = range(self.cohort_start_year, self.cohort_start_year+self.model.ret_years)
return pd.DataFrame(retlist,
index = years,
columns=['portreturn', 'prespend', 'spend_amt', 'end_val'])
class CohortHistory:
"""represents a set of cohorts retiring in different years using a strategy,
to enabling aggregating and summarizing their experiences"""
def __init__(self, model, cohort_years = None):
self.model = model
if cohort_years is None:
cohort_years = [year for year in range(self.model.first_year,
self.model.first_year + self.model.ret_cohorts)]
print('%s Create cohort history, years %d to %d' % (strftime("%H:%M:%S"),
cohort_years[0], cohort_years[-1]))
self.cohort_list = [Cohort(model, year) for year in cohort_years]
self.total_spending_ops = [cohort.total_spending for cohort in self.cohort_list]
def as_dataframe(self):
"""report on on each cohort by year, e.g. 1928"""
total_spending_ops = [cohort.total_spending for cohort in self.model.cohort_history.cohort_list]
mean_spending_ops = [cohort.mean_spending for cohort in self.model.cohort_history.cohort_list]
sd_spending_ops = [cohort.sd_spending for cohort in self.model.cohort_history.cohort_list]
min_spending_ops = [cohort.min_spending for cohort in self.model.cohort_history.cohort_list]
max_spending_ops = [cohort.max_spending for cohort in self.model.cohort_history.cohort_list]
ce_ops = [cohort.ce for cohort in self.model.cohort_history.cohort_list]
retlist = []
years = range(self.model.first_year, self.model.first_year + self.model.ret_cohorts)
for year, \
meanspend, \
sdspend, \
minspend, \
maxspend, \
totalspend, \
ce in zip(years, self.model.sess.run(mean_spending_ops),
self.model.sess.run(sd_spending_ops),
self.model.sess.run(min_spending_ops),
self.model.sess.run(max_spending_ops),
self.model.sess.run(total_spending_ops),
self.model.sess.run(ce_ops)):
retlist.append([meanspend, sdspend, minspend, maxspend, totalspend, ce])
return pd.DataFrame(retlist, index = years,
columns=['mean_spend', 'sd_spend', 'min_spend', 'max_spend', 'total_spend', 'ce'])
def spend_by_year(self):
"""report spending by year for each cohort (ret_years rows x num_cohorts)"""
dataframes = [cohort.as_dataframe() for cohort in self.model.cohort_history.cohort_list]
years = range(self.model.ret_years)
cohorts = range(len(dataframes))
retlist = []
for ix in years:
spendlist = [df.spend_amt.iloc[ix] for df in dataframes]
retlist.append(spendlist)
colnames = ["%d" % (cohort+self.model.first_year) for cohort in cohorts]
return pd.DataFrame(retlist, index = years, columns=colnames)
def returns_by_year(self):
"""report returns by year for each cohort (ret_years rows x num_cohorts)"""
dataframes = [cohort.as_dataframe() for cohort in self.model.cohort_history.cohort_list]
years = range(self.model.ret_years)
cohorts = range(len(dataframes))
retlist = []
for ix in years:
returnlist = [df.portreturn.iloc[ix] for df in dataframes]
retlist.append(returnlist)
colnames = ["%d" % (cohort+self.model.first_year) for cohort in cohorts]
return pd.DataFrame(retlist, index = years, columns=colnames)
def summarize_by_year(self):
"""report on outcomes by retirement year, e.g. retirement year 1, 2...30"""
dataframes = [cohort.as_dataframe() for cohort in self.model.cohort_history.cohort_list]
years = range(self.model.ret_years)
retlist = []
for ix in years:
spendlist = np.array([df.spend_amt.iloc[ix] for df in dataframes])
spend_mean = np.mean(spendlist)
spend_sd = np.std(spendlist)
spend_min = np.min(spendlist)
spend_max = np.max(spendlist)
retlist.append([spend_mean, spend_sd, spend_min, spend_max])
return pd.DataFrame(retlist, index = years,
columns=['spend_mean', 'spend_sd', 'spend_min', 'spend_max'])
# Optimizer
# Create an op which is the sum of spending in all years
# - negate it so it will be minimized
# - add large penalty when a stock allocation is < 0 as a soft constraint
# - add large penalty when a stock allocation is > 1 as a soft constraint
# - add large penalty when const or var spencint is < 0 as a soft constraint
# - result is an op which can be minimized by gradient descent
class CohortHistoryOptimize():
def __init__(self, model):
self.model = model
self.best_objective = 0.0
self.best_step = 0
graph = self.model.graph
with graph.as_default():
with tf.device("/cpu:0"):
print ('%s Create optimizer class' % strftime("%H:%M:%S"))
print ('%s Run variable initializers' % strftime("%H:%M:%S"))
self.model.sess.run(model.init_op)
print('%s Create cost ops' % strftime("%H:%M:%S"))
print('%s Sum %d ce ops' % (strftime("%H:%M:%S"), len(self.model.cohort_history.cohort_list)))
ce_ops = [cohort.ce for cohort in self.model.cohort_history.cohort_list]
ce_tensor = tf.pack(ce_ops, name="all_cohorts_ce_tensor")
# ce over ret_cohorts years
self.total_ce_op = self.model.gen_ce(ce_tensor, "all_cohorts_ce")
print("%s Total CE spend, all cohorts: %f" % (strftime("%H:%M:%S"),
self.model.sess.run(self.total_ce_op)))
# basic cost
cost_op_1 = tf.neg(self.total_ce_op, name="basic_cost")
print("%s Raw cost objective: %f" % (strftime("%H:%M:%S"), self.model.sess.run(cost_op_1)))
cost_op_2 = tf.add(cost_op_1, model.cost_alloc_min_0_op, name="cost_add_alloc_min_0")
print("%s Add soft constraint penalty if stock alloc < 0: %f" %
(strftime("%H:%M:%S"), self.model.sess.run(cost_op_2)))
cost_op_3 = tf.add(cost_op_2, model.cost_alloc_max_1_op, name="cost_add_alloc_max_1")
print("%s Add soft constraint penalty if stock alloc > 1: %f" %
(strftime("%H:%M:%S"), self.model.sess.run(cost_op_3)))
cost_op_4 = tf.add(cost_op_3, model.cost_vspend_min_0_op, name="cost_vspend_min_0")
print("%s Add soft constraint penalty if var spending < 0: %f" %
(strftime("%H:%M:%S"), self.model.sess.run(cost_op_4)))
cost_op_5 = tf.add(cost_op_4, model.cost_cspend_min_0_op, name="cost_cspend_min_0")
print("%s Add soft constraint if const spending < 0: %f" %
(strftime("%H:%M:%S"), self.model.sess.run(cost_op_5)))
self.cost_op = tf.add(cost_op_5, model.cost_alloc_decrease_op, name="cost_alloc_decrease")
print("%s Add soft constraint if stock alloc increases in any year: %f" %
(strftime("%H:%M:%S"), self.model.sess.run(self.cost_op)))
self.best_objective = -self.model.sess.run(self.cost_op)
print("%s All inclusive objective to be minimized: %f" % (strftime("%H:%M:%S"),
-self.best_objective))
self.best_const_spend = self.model.sess.run(model.const_spend_pct_op)
self.best_var_spend = self.model.sess.run(model.var_spending_ops)
self.best_stock_alloc = self.model.sess.run(model.allocation_ops[0])
def run_step(self, report_steps=1):
"""run one step of optimizer
calc gradients
apply gradients * learning rate to each variable to descend gradient and improve objective
increment global step to remember how many steps we've run
if (hopefully) new objective is best to date, save params and objective"""
_, step = self.model.sess.run([self.optimize_step,
self.model.increment_step])
self.steps_ago +=1
cost = self.model.sess.run(self.cost_op)
assert not(np.isnan(cost)), "Objective is nan"
objective = - cost
#print objective each step
#print("objective %f best %f" %(objective, self.best_objective))
if np.isnan(cost):
sys.stdout.write('X')
sys.stdout.flush()
elif objective > self.best_objective:
self.best_objective = objective
self.best_const_spend = self.model.sess.run(model.const_spend_pct_op)
self.best_var_spend = self.model.sess.run(model.var_spending_ops)
self.best_stock_alloc = self.model.sess.run(model.allocation_ops[0])
self.best_step = step
self.steps_ago = 0
sys.stdout.write('!')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
if step % report_steps == 0:
sys.stdout.write("\n%s step %d objective %f best %f (%d steps ago)\n" %
(strftime("%H:%M:%S"),
step,
objective,
self.best_objective,
self.steps_ago))
# print variables optimized and gradients for debugging
# sys.stdout.write("\n")
# var_vals = self.model.sess.run(self.model.all_var_ops)
# print("%s Variables" % strftime("%H:%M:%S"))
# print(var_vals)
# grad_vals = self.model.sess.run([grad[0] for grad in self.grads])
# print("%s Gradients" % strftime("%H:%M:%S"))
# print(grad_vals)
sys.stdout.flush()
self.best_bond_alloc = pd.Series([1 - bsa for bsa in self.best_stock_alloc])
pickle_list = [self.best_const_spend, self.best_var_spend, self.best_stock_alloc,
self.best_bond_alloc]
pickle.dump( pickle_list, open( picklefile, "wb" ) )
# every 10 report_steps show current best
if step % (report_steps * 10) == 0:
print ("\n#Objective: %f\n" % self.best_objective)
print ("const_spend_pct = %.12f" % self.best_const_spend)
print ("var_spend_pcts = pd.Series(%s)" % str(self.best_var_spend))
print ("stock_allocations = pd.Series(%s)\n" %str(self.best_stock_alloc))
def optimize(self, learning_rate, steps):
"""create the op for the optimizer using specified learning_rate, run for specified steps"""
self.learning_rate = learning_rate
self.steps = steps
self.steps_ago = 0 # how many steps since objective improved
print("%s Objective: %f" % (strftime("%H:%M:%S"), self.best_objective))
print("%s Constant spending: %.12f" % (strftime("%H:%M:%S"), self.best_const_spend))
print("%s Variable spending by year" % strftime("%H:%M:%S"))
print(self.best_var_spend)
print("%s Stock allocation by year" % strftime("%H:%M:%S"))
print(self.best_stock_alloc)
with self.model.graph.as_default():
with tf.device("/cpu:0"):
# minimize op
print('%s Create optimizer (learning rate %.12f)' % (strftime("%H:%M:%S"),
self.learning_rate))
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.grads = self.optimizer.compute_gradients(self.cost_op)
self.optimize_step = self.optimizer.apply_gradients(self.grads)
# following line is equivalent to previous 2 lines
# self.optimize_step = self.optimizer.minimize(self.cost_op)
print('%s Create optimizer op and run %d steps' % (strftime("%H:%M:%S"), self.steps))
for i in range(self.steps):
self.run_step()
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='safewithdrawal.py',
description='run optimization with specified learning rate, max unimproved steps',
epilog="""example:
./safewithdrawal.py 0.000001 100 2.0 opt02
"""
)
parser.add_argument('learning_rate', type=float)
parser.add_argument('steps', type=float)
parser.add_argument('gamma', type=float)
parser.add_argument('fileprefix')
args = parser.parse_args()
picklefile = "%s.pickle" % args.fileprefix
csvfile = "summary_%s.csv" % args.fileprefix
yearsfile = "years_%s.csv" % args.fileprefix
returnsfile = "retyears_%s.csv" % args.fileprefix
print('%s Start optimization session' % strftime("%H:%M:%S"))
print('%s learning_rate: %.12f steps %d picklefile %s' % (strftime("%H:%M:%S"),
args.learning_rate,
args.steps,
picklefile))
print("opening picklefile %s" % picklefile)
const_spend_pct, var_spend_pcts, stock_allocations, bond_allocations = pickle.load( open(picklefile, "rb" ) )
print ("const spend: %f" % const_spend_pct)
print ("variable spend:")
print (var_spend_pcts)
print ("stock allocation:" )
print (stock_allocations)
model = SafeWithdrawalModel(returns_list = [real_stocks, real_bonds],
names_list = ["stocks","bonds"],
allocations_list = [stock_allocations, bond_allocations],
start_val = 100.0,
const_spend_pct = const_spend_pct,
var_spend_pcts = var_spend_pcts,
gamma = args.gamma,
survival=None
)
# generate cohorts
model.cohort_history = CohortHistory(model)
model.dump_json()
print('%s Summary by cohort' % strftime("%H:%M:%S"))
print(model.cohort_history.as_dataframe())
summary = model.cohort_history.summarize_by_year()
print(summary)
# run optimizer
# set an initial learning rate that improves objective by a reasonable amount each step
learning_rate = args.learning_rate
model.optimizer = CohortHistoryOptimize(model)
model.optimizer.optimize(learning_rate, steps=1)
# continue optimizing without re-initializing vars or optimizer
# reduce learning rate if no improvement for a while
# end when learning rate is too small to make significant improvement
max_steps = 1000001 # add 1 to get one last iteration to print
max_steps_unimproved = args.steps
report_steps = 50
learning_rate = model.optimizer.learning_rate
for i in range(max_steps):
model.optimizer.run_step(report_steps=report_steps)
if model.optimizer.steps_ago >= max_steps_unimproved: # no improvement for too long
break
const_spend = model.optimizer.best_const_spend
var_spend_pcts = pd.Series(model.optimizer.best_var_spend)
stock_allocations = pd.Series(model.optimizer.best_stock_alloc)
bond_allocations = 1 - stock_allocations
pickle_list = [const_spend, var_spend_pcts, stock_allocations, bond_allocations]
pickle.dump( pickle_list, open( picklefile, "wb" ) )
model.dump_json()
all_years = model.cohort_history.spend_by_year()
all_years.to_csv(yearsfile, format="%.18f")
ret_years = model.cohort_history.returns_by_year()
ret_years.to_csv(returnsfile, format="%.18f")
summary = model.cohort_history.summarize_by_year()
summary.to_csv(csvfile, format="%.18f")
|
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# TODO:
# * supported arch for versions: for old versions of batch file without
# argument, giving bogus argument cannot be detected, so we have to hardcode
# this here
# * print warning when msvc version specified but not found
# * find out why warning do not print
# * test on 64 bits XP + VS 2005 (and VS 6 if possible)
# * SDK
# * Assembly
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__doc__ = """Module for Visual C/C++ detection and configuration.
"""
import SCons.compat
import os
import platform
from string import digits as string_digits
import SCons.Warnings
import common
debug = common.debug
import sdk
get_installed_sdks = sdk.get_installed_sdks
class VisualCException(Exception):
pass
class UnsupportedVersion(VisualCException):
pass
class UnsupportedArch(VisualCException):
pass
class MissingConfiguration(VisualCException):
pass
class NoVersionFound(VisualCException):
pass
class BatchFileExecutionError(VisualCException):
pass
# Dict to 'canonalize' the arch
_ARCH_TO_CANONICAL = {
"amd64" : "amd64",
"emt64" : "amd64",
"i386" : "x86",
"i486" : "x86",
"i586" : "x86",
"i686" : "x86",
"ia64" : "ia64",
"itanium" : "ia64",
"x86" : "x86",
"x86_64" : "amd64",
}
# Given a (host, target) tuple, return the argument for the bat file. Both host
# and targets should be canonalized.
_HOST_TARGET_ARCH_TO_BAT_ARCH = {
("x86", "x86"): "x86",
("x86", "amd64"): "x86_amd64",
("amd64", "amd64"): "amd64",
("amd64", "x86"): "x86",
("x86", "ia64"): "x86_ia64"
}
def get_host_target(env):
debug('vc.py:get_host_target()')
host_platform = env.get('HOST_ARCH')
if not host_platform:
host_platform = platform.machine()
# TODO(2.5): the native Python platform.machine() function returns
# '' on all Python versions before 2.6, after which it also uses
# PROCESSOR_ARCHITECTURE.
if not host_platform:
host_platform = os.environ.get('PROCESSOR_ARCHITECTURE', '')
# Retain user requested TARGET_ARCH
req_target_platform = env.get('TARGET_ARCH')
debug('vc.py:get_host_target() req_target_platform:%s'%req_target_platform)
if req_target_platform:
# If user requested a specific platform then only try that one.
target_platform = req_target_platform
else:
target_platform = host_platform
try:
host = _ARCH_TO_CANONICAL[host_platform.lower()]
except KeyError, e:
msg = "Unrecognized host architecture %s"
raise ValueError(msg % repr(host_platform))
try:
target = _ARCH_TO_CANONICAL[target_platform.lower()]
except KeyError, e:
raise ValueError("Unrecognized target architecture %s" % target_platform)
return (host, target,req_target_platform)
_VCVER = ["10.0Exp","10.0", "9.0", "9.0Exp","8.0", "8.0Exp","7.1", "7.0", "6.0"]
_VCVER_TO_PRODUCT_DIR = {
'10.0Exp' : [
r'Microsoft\VCExpress\10.0\Setup\VC\ProductDir'],
'10.0': [
r'Microsoft\VisualStudio\10.0\Setup\VC\ProductDir'],
'9.0': [
r'Microsoft\VisualStudio\9.0\Setup\VC\ProductDir'],
'9.0Exp' : [
r'Microsoft\VCExpress\9.0\Setup\VC\ProductDir'],
'8.0': [
r'Microsoft\VisualStudio\8.0\Setup\VC\ProductDir'],
'8.0Exp': [
r'Microsoft\VCExpress\8.0\Setup\VC\ProductDir'],
'7.1': [
r'Microsoft\VisualStudio\7.1\Setup\VC\ProductDir'],
'7.0': [
r'Microsoft\VisualStudio\7.0\Setup\VC\ProductDir'],
'6.0': [
r'Microsoft\VisualStudio\6.0\Setup\Microsoft Visual C++\ProductDir']
}
def msvc_version_to_maj_min(msvc_version):
msvc_version_numeric = ''.join([x for x in msvc_version if x in string_digits + '.'])
t = msvc_version_numeric.split(".")
if not len(t) == 2:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
try:
maj = int(t[0])
min = int(t[1])
return maj, min
except ValueError, e:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
def is_host_target_supported(host_target, msvc_version):
"""Return True if the given (host, target) tuple is supported given the
msvc version.
Parameters
----------
host_target: tuple
tuple of (canonalized) host-target, e.g. ("x86", "amd64") for cross
compilation from 32 bits windows to 64 bits.
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Note
----
This only check whether a given version *may* support the given (host,
target), not that the toolchain is actually present on the machine.
"""
# We assume that any Visual Studio version supports x86 as a target
if host_target[1] != "x86":
maj, min = msvc_version_to_maj_min(msvc_version)
if maj < 8:
return False
return True
def find_vc_pdir(msvc_version):
"""Try to find the product directory for the given
version.
Note
----
If for some reason the requested version could not be found, an
exception which inherits from VisualCException will be raised."""
root = 'Software\\'
if common.is_win64():
root = root + 'Wow6432Node\\'
try:
hkeys = _VCVER_TO_PRODUCT_DIR[msvc_version]
except KeyError:
debug("Unknown version of MSVC: %s" % msvc_version)
raise UnsupportedVersion("Unknown version %s" % msvc_version)
for key in hkeys:
key = root + key
try:
comps = common.read_reg(key)
except WindowsError, e:
debug('find_vc_dir(): no VC registry key %s' % repr(key))
else:
debug('find_vc_dir(): found VC in registry: %s' % comps)
if os.path.exists(comps):
return comps
else:
debug('find_vc_dir(): reg says dir is %s, but it does not exist. (ignoring)'\
% comps)
raise MissingConfiguration("registry dir %s not found on the filesystem" % comps)
return None
def find_batch_file(env,msvc_version,host_arch,target_arch):
"""
Find the location of the batch script which should set up the compiler
for any TARGET_ARCH whose compilers were installed by Visual Studio/VCExpress
"""
pdir = find_vc_pdir(msvc_version)
if pdir is None:
raise NoVersionFound("No version of Visual Studio found")
debug('vc.py: find_batch_file() pdir:%s'%pdir)
# filter out e.g. "Exp" from the version name
msvc_ver_numeric = ''.join([x for x in msvc_version if x in string_digits + "."])
vernum = float(msvc_ver_numeric)
if 7 <= vernum < 8:
pdir = os.path.join(pdir, os.pardir, "Common7", "Tools")
batfilename = os.path.join(pdir, "vsvars32.bat")
elif vernum < 7:
pdir = os.path.join(pdir, "Bin")
batfilename = os.path.join(pdir, "vcvars32.bat")
else: # >= 8
batfilename = os.path.join(pdir, "vcvarsall.bat")
if not os.path.exists(batfilename):
debug("Not found: %s" % batfilename)
batfilename = None
installed_sdks=get_installed_sdks()
for _sdk in installed_sdks:
sdk_bat_file=_sdk.get_sdk_vc_script(host_arch,target_arch)
sdk_bat_file_path=os.path.join(pdir,sdk_bat_file)
debug('vc.py:find_batch_file() sdk_bat_file_path:%s'%sdk_bat_file_path)
if os.path.exists(sdk_bat_file_path):
return (batfilename,sdk_bat_file_path)
else:
debug("vc.py:find_batch_file() not found:%s"%sdk_bat_file_path)
else:
return (batfilename,None)
__INSTALLED_VCS_RUN = None
def cached_get_installed_vcs():
global __INSTALLED_VCS_RUN
if __INSTALLED_VCS_RUN is None:
ret = get_installed_vcs()
__INSTALLED_VCS_RUN = ret
return __INSTALLED_VCS_RUN
def get_installed_vcs():
installed_versions = []
for ver in _VCVER:
debug('trying to find VC %s' % ver)
try:
if find_vc_pdir(ver):
debug('found VC %s' % ver)
installed_versions.append(ver)
else:
debug('find_vc_pdir return None for ver %s' % ver)
except VisualCException, e:
debug('did not find VC %s: caught exception %s' % (ver, str(e)))
return installed_versions
def reset_installed_vcs():
"""Make it try again to find VC. This is just for the tests."""
__INSTALLED_VCS_RUN = None
def script_env(script, args=None):
stdout = common.get_output(script, args)
# Stupid batch files do not set return code: we take a look at the
# beginning of the output for an error message instead
olines = stdout.splitlines()
if olines[0].startswith("The specified configuration type is missing"):
raise BatchFileExecutionError("\n".join(olines[:2]))
return common.parse_output(stdout)
def get_default_version(env):
debug('get_default_version()')
msvc_version = env.get('MSVC_VERSION')
msvs_version = env.get('MSVS_VERSION')
debug('get_default_version(): msvc_version:%s msvs_version:%s'%(msvc_version,msvs_version))
if msvs_version and not msvc_version:
SCons.Warnings.warn(
SCons.Warnings.DeprecatedWarning,
"MSVS_VERSION is deprecated: please use MSVC_VERSION instead ")
return msvs_version
elif msvc_version and msvs_version:
if not msvc_version == msvs_version:
SCons.Warnings.warn(
SCons.Warnings.VisualVersionMismatch,
"Requested msvc version (%s) and msvs version (%s) do " \
"not match: please use MSVC_VERSION only to request a " \
"visual studio version, MSVS_VERSION is deprecated" \
% (msvc_version, msvs_version))
return msvs_version
if not msvc_version:
installed_vcs = cached_get_installed_vcs()
debug('installed_vcs:%s' % installed_vcs)
if not installed_vcs:
#msg = 'No installed VCs'
#debug('msv %s\n' % repr(msg))
#SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, msg)
debug('msvc_setup_env: No installed VCs')
return None
msvc_version = installed_vcs[0]
debug('msvc_setup_env: using default installed MSVC version %s\n' % repr(msvc_version))
return msvc_version
def msvc_setup_env_once(env):
try:
has_run = env["MSVC_SETUP_RUN"]
except KeyError:
has_run = False
if not has_run:
msvc_setup_env(env)
env["MSVC_SETUP_RUN"] = True
def msvc_find_valid_batch_script(env,version):
debug('vc.py:msvc_find_valid_batch_script()')
# Find the host platform, target platform, and if present the requested
# target platform
(host_platform, target_platform,req_target_platform) = get_host_target(env)
# If the user hasn't specifically requested a TARGET_ARCH, and
# The TARGET_ARCH is amd64 then also try 32 bits if there are no viable
# 64 bit tools installed
try_target_archs = [target_platform]
if not req_target_platform and target_platform=='amd64':
try_target_archs.append('x86')
d = None
for tp in try_target_archs:
# Set to current arch.
env['TARGET_ARCH']=tp
debug("vc.py:msvc_find_valid_batch_script() trying target_platform:%s"%tp)
host_target = (host_platform, tp)
if not is_host_target_supported(host_target, version):
warn_msg = "host, target = %s not supported for MSVC version %s" % \
(host_target, version)
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
arg = _HOST_TARGET_ARCH_TO_BAT_ARCH[host_target]
# Try to locate a batch file for this host/target platform combo
try:
(vc_script,sdk_script) = find_batch_file(env,version,host_platform,tp)
debug('vc.py:msvc_find_valid_batch_script() vc_script:%s sdk_script:%s'%(vc_script,sdk_script))
except VisualCException, e:
msg = str(e)
debug('Caught exception while looking for batch file (%s)' % msg)
warn_msg = "VC version %s not installed. " + \
"C/C++ compilers are most likely not set correctly.\n" + \
" Installed versions are: %s"
warn_msg = warn_msg % (version, cached_get_installed_vcs())
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
continue
# Try to use the located batch file for this host/target platform combo
debug('vc.py:msvc_find_valid_batch_script() use_script 2 %s, args:%s\n' % (repr(vc_script), arg))
if vc_script:
try:
d = script_env(vc_script, args=arg)
except BatchFileExecutionError, e:
debug('vc.py:msvc_find_valid_batch_script() use_script 3: failed running VC script %s: %s: Error:%s'%(repr(vc_script),arg,e))
vc_script=None
if not vc_script and sdk_script:
debug('vc.py:msvc_find_valid_batch_script() use_script 4: trying sdk script: %s'%(sdk_script))
try:
d = script_env(sdk_script,args=[])
except BatchFileExecutionError,e:
debug('vc.py:msvc_find_valid_batch_script() use_script 5: failed running SDK script %s: Error:%s'%(repr(sdk_script),e))
continue
elif not vc_script and not sdk_script:
debug('vc.py:msvc_find_valid_batch_script() use_script 6: Neither VC script nor SDK script found')
continue
# If we cannot find a viable installed compiler, reset the TARGET_ARCH
# To it's initial value
if not d:
env['TARGET_ARCH']=req_target_platform
return d
def msvc_setup_env(env):
debug('msvc_setup_env()')
version = get_default_version(env)
if version is None:
warn_msg = "No version of Visual Studio compiler found - C/C++ " \
"compilers most likely not set correctly"
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
debug('msvc_setup_env: using specified MSVC version %s\n' % repr(version))
# XXX: we set-up both MSVS version for backward
# compatibility with the msvs tool
env['MSVC_VERSION'] = version
env['MSVS_VERSION'] = version
env['MSVS'] = {}
use_script = env.get('MSVC_USE_SCRIPT', True)
if SCons.Util.is_String(use_script):
debug('vc.py:msvc_setup_env() use_script 1 %s\n' % repr(use_script))
d = script_env(use_script)
elif use_script:
d = msvc_find_valid_batch_script(env,version)
debug('vc.py:msvc_setup_env() use_script 2 %s\n' % d)
if not d:
return d
else:
debug('MSVC_USE_SCRIPT set to False')
warn_msg = "MSVC_USE_SCRIPT set to False, assuming environment " \
"set correctly."
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
for k, v in d.items():
debug('vc.py:msvc_setup_env() env:%s -> %s'%(k,v))
env.PrependENVPath(k, v, delete_existing=True)
def msvc_exists(version=None):
vcs = cached_get_installed_vcs()
if version is None:
return len(vcs) > 0
return version in vcs
|
|
import math
from pandac.PandaModules import Vec3, deg2Rad, Point3, NodePath, VBase4, CollisionHandlerEvent, CollisionNode, CollisionSphere
from direct.fsm import ClassicFSM, State
from direct.distributed.ClockDelta import globalClockDelta
from direct.gui.DirectGui import DirectLabel
from direct.interval.IntervalGlobal import Sequence, LerpScaleInterval, LerpFunctionInterval, Func, Parallel, LerpPosInterval, Wait, SoundInterval, LerpColorScaleInterval
from toontown.toonbase import ToontownGlobals, TTLocalizer, ToontownTimer
from toontown.minigame import ArrowKeys
from toontown.minigame import DistributedMinigame
from toontown.minigame import DistributedIceWorld
from toontown.minigame import IceGameGlobals
from toontown.minigame import MinigameAvatarScorePanel
from toontown.minigame import IceTreasure
class DistributedIceGame(DistributedMinigame.DistributedMinigame, DistributedIceWorld.DistributedIceWorld):
notify = directNotify.newCategory('DistributedIceGame')
MaxLocalForce = 100
MaxPhysicsForce = 25000
def __init__(self, cr):
DistributedMinigame.DistributedMinigame.__init__(self, cr)
DistributedIceWorld.DistributedIceWorld.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedIceGame', [State.State('off', self.enterOff, self.exitOff, ['inputChoice']),
State.State('inputChoice', self.enterInputChoice, self.exitInputChoice, ['waitServerChoices',
'moveTires',
'displayVotes',
'cleanup']),
State.State('waitServerChoices', self.enterWaitServerChoices, self.exitWaitServerChoices, ['moveTires', 'cleanup']),
State.State('moveTires', self.enterMoveTires, self.exitMoveTires, ['synch', 'cleanup']),
State.State('synch', self.enterSynch, self.exitSynch, ['inputChoice', 'scoring', 'cleanup']),
State.State('scoring', self.enterScoring, self.exitScoring, ['cleanup', 'finalResults', 'inputChoice']),
State.State('finalResults', self.enterFinalResults, self.exitFinalResults, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.cameraThreeQuarterView = (0, -22, 45, 0, -62.89, 0)
self.tireDict = {}
self.forceArrowDict = {}
self.canDrive = False
self.timer = None
self.timerStartTime = None
self.curForce = 0
self.curHeading = 0
self.headingMomentum = 0.0
self.forceMomentum = 0.0
self.allTireInputs = None
self.curRound = 0
self.curMatch = 0
self.controlKeyWarningLabel = DirectLabel(text=TTLocalizer.IceGameControlKeyWarning, text_fg=VBase4(1, 0, 0, 1), relief=None, pos=(0.0, 0, 0), scale=0.15)
self.controlKeyWarningLabel.hide()
self.waitingMoveLabel = DirectLabel(text=TTLocalizer.IceGameWaitingForPlayersToFinishMove, text_fg=VBase4(1, 1, 1, 1), relief=None, pos=(-0.6, 0, -0.75), scale=0.075)
self.waitingMoveLabel.hide()
self.waitingSyncLabel = DirectLabel(text=TTLocalizer.IceGameWaitingForAISync, text_fg=VBase4(1, 1, 1, 1), relief=None, pos=(-0.6, 0, -0.75), scale=0.075)
self.waitingSyncLabel.hide()
self.infoLabel = DirectLabel(text='', text_fg=VBase4(0, 0, 0, 1), relief=None, pos=(0.0, 0, 0.7), scale=0.075)
self.updateInfoLabel()
self.lastForceArrowUpdateTime = 0
self.sendForceArrowUpdateAsap = False
self.treasures = []
self.penalties = []
self.obstacles = []
self.controlKeyPressed = False
self.controlKeyWarningIval = None
return
def delete(self):
DistributedIceWorld.DistributedIceWorld.delete(self)
DistributedMinigame.DistributedMinigame.delete(self)
if self.controlKeyWarningIval:
self.controlKeyWarningIval.finish()
self.controlKeyWarningIval = None
self.controlKeyWarningLabel.destroy()
del self.controlKeyWarningLabel
self.waitingMoveLabel.destroy()
del self.waitingMoveLabel
self.waitingSyncLabel.destroy()
del self.waitingSyncLabel
self.infoLabel.destroy()
del self.infoLabel
for treasure in self.treasures:
treasure.destroy()
del self.treasures
for penalty in self.penalties:
penalty.destroy()
del self.penalties
for obstacle in self.obstacles:
obstacle.removeNode()
del self.obstacles
del self.gameFSM
return
def announceGenerate(self):
DistributedMinigame.DistributedMinigame.announceGenerate(self)
DistributedIceWorld.DistributedIceWorld.announceGenerate(self)
self.debugTaskName = self.uniqueName('debugTask')
def getTitle(self):
return TTLocalizer.IceGameTitle
def getInstructions(self):
szId = self.getSafezoneId()
numPenalties = IceGameGlobals.NumPenalties[szId]
result = TTLocalizer.IceGameInstructions
if numPenalties == 0:
result = TTLocalizer.IceGameInstructionsNoTnt
return result
def getMaxDuration(self):
return 0
def load(self):
self.notify.debug('load')
DistributedMinigame.DistributedMinigame.load(self)
self.music = base.loadMusic('phase_4/audio/bgm/MG_IceGame.mid')
self.gameBoard = loader.loadModel('phase_4/models/minigames/ice_game_icerink')
background = loader.loadModel('phase_4/models/minigames/ice_game_2d')
background.reparentTo(self.gameBoard)
self.gameBoard.setPosHpr(0, 0, 0, 0, 0, 0)
self.gameBoard.setScale(1.0)
self.setupSimulation()
index = 0
for avId in self.avIdList:
self.setupTire(avId, index)
self.setupForceArrow(avId)
index += 1
for index in xrange(len(self.avIdList), 4):
self.setupTire(-index, index)
self.setupForceArrow(-index)
self.showForceArrows(realPlayersOnly=True)
self.westWallModel = NodePath()
if not self.westWallModel.isEmpty():
self.westWallModel.reparentTo(self.gameBoard)
self.westWallModel.setPos(IceGameGlobals.MinWall[0], IceGameGlobals.MinWall[1], 0)
self.westWallModel.setScale(4)
self.eastWallModel = NodePath()
if not self.eastWallModel.isEmpty():
self.eastWallModel.reparentTo(self.gameBoard)
self.eastWallModel.setPos(IceGameGlobals.MaxWall[0], IceGameGlobals.MaxWall[1], 0)
self.eastWallModel.setScale(4)
self.eastWallModel.setH(180)
self.arrowKeys = ArrowKeys.ArrowKeys()
self.target = loader.loadModel('phase_3/models/misc/sphere')
self.target.setScale(0.01)
self.target.reparentTo(self.gameBoard)
self.target.setPos(0, 0, 0)
self.scoreCircle = loader.loadModel('phase_4/models/minigames/ice_game_score_circle')
self.scoreCircle.setScale(0.01)
self.scoreCircle.reparentTo(self.gameBoard)
self.scoreCircle.setZ(IceGameGlobals.TireRadius / 2.0)
self.scoreCircle.setAlphaScale(0.5)
self.scoreCircle.setTransparency(1)
self.scoreCircle.hide()
self.treasureModel = loader.loadModel('phase_4/models/minigames/ice_game_barrel')
self.penaltyModel = loader.loadModel('phase_4/models/minigames/ice_game_tnt2')
self.penaltyModel.setScale(0.75, 0.75, 0.7)
szId = self.getSafezoneId()
obstacles = IceGameGlobals.Obstacles[szId]
index = 0
cubicObstacle = IceGameGlobals.ObstacleShapes[szId]
for pos in obstacles:
newPos = Point3(pos[0], pos[1], IceGameGlobals.TireRadius)
newObstacle = self.createObstacle(newPos, index, cubicObstacle)
self.obstacles.append(newObstacle)
index += 1
self.countSound = loader.loadSfx('phase_3.5/audio/sfx/tick_counter.mp3')
self.treasureGrabSound = loader.loadSfx('phase_4/audio/sfx/MG_sfx_vine_game_bananas.mp3')
self.penaltyGrabSound = loader.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.mp3')
self.tireSounds = []
for tireIndex in xrange(4):
tireHit = loader.loadSfx('phase_4/audio/sfx/Golf_Hit_Barrier_1.mp3')
wallHit = loader.loadSfx('phase_4/audio/sfx/MG_maze_pickup.mp3')
obstacleHit = loader.loadSfx('phase_4/audio/sfx/Golf_Hit_Barrier_2.mp3')
self.tireSounds.append({'tireHit': tireHit,
'wallHit': wallHit,
'obstacleHit': obstacleHit})
self.arrowRotateSound = loader.loadSfx('phase_4/audio/sfx/MG_sfx_ice_force_rotate.wav')
self.arrowUpSound = loader.loadSfx('phase_4/audio/sfx/MG_sfx_ice_force_increase_3sec.mp3')
self.arrowDownSound = loader.loadSfx('phase_4/audio/sfx/MG_sfx_ice_force_decrease_3sec.mp3')
self.scoreCircleSound = loader.loadSfx('phase_4/audio/sfx/MG_sfx_ice_scoring_1.mp3')
def unload(self):
self.notify.debug('unload')
DistributedMinigame.DistributedMinigame.unload(self)
del self.music
self.gameBoard.removeNode()
del self.gameBoard
for forceArrow in self.forceArrowDict.values():
forceArrow.removeNode()
del self.forceArrowDict
self.scoreCircle.removeNode()
del self.scoreCircle
del self.countSound
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.DistributedMinigame.onstage(self)
self.gameBoard.reparentTo(render)
self.__placeToon(self.localAvId)
self.moveCameraToTop()
self.scorePanels = []
base.playMusic(self.music, looping=1, volume=0.8)
def offstage(self):
self.notify.debug('offstage')
self.music.stop()
self.gameBoard.hide()
self.infoLabel.hide()
for avId in self.tireDict:
self.tireDict[avId]['tireNodePath'].hide()
for panel in self.scorePanels:
panel.cleanup()
del self.scorePanels
for obstacle in self.obstacles:
obstacle.hide()
for treasure in self.treasures:
treasure.nodePath.hide()
for penalty in self.penalties:
penalty.nodePath.hide()
for avId in self.avIdList:
av = self.getAvatar(avId)
if av:
av.dropShadow.show()
av.resetLOD()
taskMgr.remove(self.uniqueName('aimtask'))
self.arrowKeys.destroy()
del self.arrowKeys
DistributedMinigame.DistributedMinigame.offstage(self)
def handleDisabledAvatar(self, avId):
self.notify.debug('handleDisabledAvatar')
self.notify.debug('avatar ' + str(avId) + ' disabled')
DistributedMinigame.DistributedMinigame.handleDisabledAvatar(self, avId)
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.DistributedMinigame.setGameReady(self):
return
for index in xrange(self.numPlayers):
avId = self.avIdList[index]
toon = self.getAvatar(avId)
if toon:
toon.reparentTo(render)
self.__placeToon(avId)
toon.forwardSpeed = 0
toon.rotateSpeed = False
toon.dropShadow.hide()
toon.setAnimState('Sit')
if avId in self.tireDict:
tireNp = self.tireDict[avId]['tireNodePath']
toon.reparentTo(tireNp)
toon.setY(1.0)
toon.setZ(-3)
toon.startLookAround()
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.DistributedMinigame.setGameStart(self, timestamp)
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
toon.stopLookAround()
self.scores = [0] * self.numPlayers
spacing = 0.4
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avName = self.getAvatarName(avId)
scorePanel = MinigameAvatarScorePanel.MinigameAvatarScorePanel(avId, avName)
scorePanel.setScale(0.9)
scorePanel.setPos(0.75 - spacing * (self.numPlayers - 1 - i), 0.0, 0.875)
scorePanel.makeTransparent(0.75)
self.scorePanels.append(scorePanel)
self.arrowKeys.setPressHandlers([self.__upArrowPressed,
self.__downArrowPressed,
self.__leftArrowPressed,
self.__rightArrowPressed,
self.__controlPressed])
def isInPlayState(self):
if not self.gameFSM.getCurrentState():
return False
if not self.gameFSM.getCurrentState().getName() == 'play':
return False
return True
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterInputChoice(self):
self.notify.debug('enterInputChoice')
self.forceLocalToonToTire()
self.controlKeyPressed = False
if self.curRound == 0:
self.setupStartOfMatch()
else:
self.notify.debug('self.curRound = %s' % self.curRound)
self.timer = ToontownTimer.ToontownTimer()
self.timer.hide()
if self.timerStartTime != None:
self.startTimer()
self.showForceArrows(realPlayersOnly=True)
self.localForceArrow().setPosHpr(0, 0, -1.0, 0, 0, 0)
self.localForceArrow().reparentTo(self.localTireNp())
self.localForceArrow().setY(IceGameGlobals.TireRadius)
self.localTireNp().headsUp(self.target)
self.notify.debug('self.localForceArrow() heading = %s' % self.localForceArrow().getH())
self.curHeading = self.localTireNp().getH()
self.curForce = 25
self.updateLocalForceArrow()
for avId in self.forceArrowDict:
forceArrow = self.forceArrowDict[avId]
forceArrow.setPosHpr(0, 0, -1.0, 0, 0, 0)
tireNp = self.tireDict[avId]['tireNodePath']
forceArrow.reparentTo(tireNp)
forceArrow.setY(IceGameGlobals.TireRadius)
tireNp.headsUp(self.target)
self.updateForceArrow(avId, tireNp.getH(), 25)
taskMgr.add(self.__aimTask, self.uniqueName('aimtask'))
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.stop()
self.sendForceArrowUpdateAsap = False
return
def exitInputChoice(self):
if not self.controlKeyPressed:
if self.controlKeyWarningIval:
self.controlKeyWarningIval.finish()
self.controlKeyWarningIval = None
self.controlKeyWarningIval = Sequence(Func(self.controlKeyWarningLabel.show), self.controlKeyWarningLabel.colorScaleInterval(10, VBase4(1, 1, 1, 0), startColorScale=VBase4(1, 1, 1, 1)), Func(self.controlKeyWarningLabel.hide))
self.controlKeyWarningIval.start()
if self.timer != None:
self.timer.destroy()
self.timer = None
self.timerStartTime = None
self.hideForceArrows()
self.arrowRotateSound.stop()
self.arrowUpSound.stop()
self.arrowDownSound.stop()
taskMgr.remove(self.uniqueName('aimtask'))
return
def enterWaitServerChoices(self):
self.waitingMoveLabel.show()
self.showForceArrows(True)
def exitWaitServerChoices(self):
self.waitingMoveLabel.hide()
self.hideForceArrows()
def enterMoveTires(self):
for key in self.tireDict:
body = self.tireDict[key]['tireBody']
body.setAngularVel(0, 0, 0)
body.setLinearVel(0, 0, 0)
for index in xrange(len(self.allTireInputs)):
input = self.allTireInputs[index]
avId = self.avIdList[index]
body = self.getTireBody(avId)
degs = input[1] + 90
tireNp = self.getTireNp(avId)
tireH = tireNp.getH()
self.notify.debug('tireH = %s' % tireH)
radAngle = deg2Rad(degs)
foo = NodePath('foo')
dirVector = Vec3(math.cos(radAngle), math.sin(radAngle), 0)
self.notify.debug('dirVector is now=%s' % dirVector)
inputForce = input[0]
inputForce /= self.MaxLocalForce
inputForce *= self.MaxPhysicsForce
force = dirVector * inputForce
self.notify.debug('adding force %s to %d' % (force, avId))
body.addForce(force)
self.enableAllTireBodies()
self.totalPhysicsSteps = 0
self.startSim()
taskMgr.add(self.__moveTiresTask, self.uniqueName('moveTiresTtask'))
def exitMoveTires(self):
self.forceLocalToonToTire()
self.disableAllTireBodies()
self.stopSim()
self.notify.debug('total Physics steps = %d' % self.totalPhysicsSteps)
taskMgr.remove(self.uniqueName('moveTiresTtask'))
def enterSynch(self):
self.waitingSyncLabel.show()
def exitSynch(self):
self.waitingSyncLabel.hide()
def enterScoring(self):
sortedByDistance = []
for avId in self.avIdList:
np = self.getTireNp(avId)
pos = np.getPos()
pos.setZ(0)
sortedByDistance.append((avId, pos.length()))
def compareDistance(x, y):
if x[1] - y[1] > 0:
return 1
elif x[1] - y[1] < 0:
return -1
else:
return 0
sortedByDistance.sort(cmp=compareDistance)
self.scoreMovie = Sequence()
curScale = 0.01
curTime = 0
self.scoreCircle.setScale(0.01)
self.scoreCircle.show()
self.notify.debug('newScores = %s' % self.newScores)
circleStartTime = 0
for index in xrange(len(sortedByDistance)):
distance = sortedByDistance[index][1]
avId = sortedByDistance[index][0]
scorePanelIndex = self.avIdList.index(avId)
time = (distance - curScale) / IceGameGlobals.ExpandFeetPerSec
if time < 0:
time = 0.01
scaleXY = distance + IceGameGlobals.TireRadius
self.notify.debug('circleStartTime = %s' % circleStartTime)
self.scoreMovie.append(Parallel(LerpScaleInterval(self.scoreCircle, time, Point3(scaleXY, scaleXY, 1.0)), SoundInterval(self.scoreCircleSound, duration=time, startTime=circleStartTime)))
circleStartTime += time
startScore = self.scorePanels[scorePanelIndex].getScore()
destScore = self.newScores[scorePanelIndex]
self.notify.debug('for avId %d, startScore=%d, newScores=%d' % (avId, startScore, destScore))
def increaseScores(t, scorePanelIndex = scorePanelIndex, startScore = startScore, destScore = destScore):
oldScore = self.scorePanels[scorePanelIndex].getScore()
diff = destScore - startScore
newScore = int(startScore + diff * t)
if newScore > oldScore:
base.playSfx(self.countSound)
self.scorePanels[scorePanelIndex].setScore(newScore)
self.scores[scorePanelIndex] = newScore
duration = (destScore - startScore) * IceGameGlobals.ScoreCountUpRate
tireNp = self.tireDict[avId]['tireNodePath']
self.scoreMovie.append(Parallel(LerpFunctionInterval(increaseScores, duration), Sequence(LerpColorScaleInterval(tireNp, duration / 6.0, VBase4(1, 0, 0, 1)), LerpColorScaleInterval(tireNp, duration / 6.0, VBase4(1, 1, 1, 1)), LerpColorScaleInterval(tireNp, duration / 6.0, VBase4(1, 0, 0, 1)), LerpColorScaleInterval(tireNp, duration / 6.0, VBase4(1, 1, 1, 1)), LerpColorScaleInterval(tireNp, duration / 6.0, VBase4(1, 0, 0, 1)), LerpColorScaleInterval(tireNp, duration / 6.0, VBase4(1, 1, 1, 1)))))
curScale += distance
self.scoreMovie.append(Func(self.sendUpdate, 'reportScoringMovieDone', []))
self.scoreMovie.start()
def exitScoring(self):
self.scoreMovie.finish()
self.scoreMovie = None
self.scoreCircle.hide()
return
def enterFinalResults(self):
lerpTrack = Parallel()
lerpDur = 0.5
tY = 0.6
bY = -.05
lX = -.5
cX = 0
rX = 0.5
scorePanelLocs = (((cX, bY),),
((lX, bY), (rX, bY)),
((cX, tY), (lX, bY), (rX, bY)),
((lX, tY),
(rX, tY),
(lX, bY),
(rX, bY)))
scorePanelLocs = scorePanelLocs[self.numPlayers - 1]
for i in xrange(self.numPlayers):
panel = self.scorePanels[i]
pos = scorePanelLocs[i]
lerpTrack.append(Parallel(LerpPosInterval(panel, lerpDur, Point3(pos[0], 0, pos[1]), blendType='easeInOut'), LerpScaleInterval(panel, lerpDur, Vec3(panel.getScale()) * 2.0, blendType='easeInOut')))
self.showScoreTrack = Parallel(lerpTrack, Sequence(Wait(IceGameGlobals.ShowScoresDuration), Func(self.gameOver)))
self.showScoreTrack.start()
def exitFinalResults(self):
self.showScoreTrack.pause()
del self.showScoreTrack
def enterCleanup(self):
self.notify.debug('enterCleanup')
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.start()
def exitCleanup(self):
pass
def __placeToon(self, avId):
toon = self.getAvatar(avId)
if toon:
toon.setPos(0, 0, 0)
toon.setHpr(0, 0, 0)
def moveCameraToTop(self):
camera.reparentTo(render)
p = self.cameraThreeQuarterView
camera.setPosHpr(p[0], p[1], p[2], p[3], p[4], p[5])
def setupTire(self, avId, index):
tireNp, tireBody, tireOdeGeom = self.createTire(index)
self.tireDict[avId] = {'tireNodePath': tireNp,
'tireBody': tireBody,
'tireOdeGeom': tireOdeGeom}
if avId <= 0:
tireBlocker = tireNp.find('**/tireblockermesh')
if not tireBlocker.isEmpty():
tireBlocker.hide()
if avId == self.localAvId:
tireNp = self.tireDict[avId]['tireNodePath']
self.treasureSphereName = 'treasureCollider'
self.treasureCollSphere = CollisionSphere(0, 0, 0, IceGameGlobals.TireRadius)
self.treasureCollSphere.setTangible(0)
self.treasureCollNode = CollisionNode(self.treasureSphereName)
self.treasureCollNode.setFromCollideMask(ToontownGlobals.PieBitmask)
self.treasureCollNode.addSolid(self.treasureCollSphere)
self.treasureCollNodePath = tireNp.attachNewNode(self.treasureCollNode)
self.treasureHandler = CollisionHandlerEvent()
self.treasureHandler.addInPattern('%fn-intoTreasure')
base.cTrav.addCollider(self.treasureCollNodePath, self.treasureHandler)
eventName = '%s-intoTreasure' % self.treasureCollNodePath.getName()
self.notify.debug('eventName = %s' % eventName)
self.accept(eventName, self.toonHitSomething)
def setupForceArrow(self, avId):
arrow = loader.loadModel('phase_4/models/minigames/ice_game_arrow')
priority = 0
if avId < 0:
priority = -avId
else:
priority = self.avIdList.index(avId)
if avId == self.localAvId:
priority = 10
self.forceArrowDict[avId] = arrow
def hideForceArrows(self):
for forceArrow in self.forceArrowDict.values():
forceArrow.hide()
def showForceArrows(self, realPlayersOnly = True):
for avId in self.forceArrowDict:
if realPlayersOnly:
if avId > 0:
self.forceArrowDict[avId].show()
else:
self.forceArrowDict[avId].hide()
else:
self.forceArrowDict[avId].show()
def localForceArrow(self):
if self.localAvId in self.forceArrowDict:
return self.forceArrowDict[self.localAvId]
else:
return None
return None
def setChoices(self, input0, input1, input2, input3):
pass
def startDebugTask(self):
taskMgr.add(self.debugTask, self.debugTaskName)
def stopDebugTask(self):
taskMgr.remove(self.debugTaskName)
def debugTask(self, task):
if self.canDrive and self.tireDict.has_key(localAvatar.doId):
dt = globalClock.getDt()
forceMove = 25000
forceMoveDt = forceMove
tireBody = self.tireDict[localAvatar.doId]['tireBody']
if self.arrowKeys.upPressed() and not tireBody.isEnabled():
x = 0
y = 1
tireBody.enable()
tireBody.addForce(Vec3(x * forceMoveDt, y * forceMoveDt, 0))
if self.arrowKeys.downPressed() and not tireBody.isEnabled():
x = 0
y = -1
tireBody.enable()
tireBody.addForce(Vec3(x * forceMoveDt, y * forceMoveDt, 0))
if self.arrowKeys.leftPressed() and not tireBody.isEnabled():
x = -1
y = 0
tireBody.enable()
tireBody.addForce(Vec3(x * forceMoveDt, y * forceMoveDt, 0))
if self.arrowKeys.rightPressed() and not tireBody.isEnabled():
x = 1
y = 0
tireBody.enable()
tireBody.addForce(Vec3(x * forceMoveDt, y * forceMoveDt, 0))
return task.cont
def __upArrowPressed(self):
pass
def __downArrowPressed(self):
pass
def __leftArrowPressed(self):
pass
def __rightArrowPressed(self):
pass
def __controlPressed(self):
if self.gameFSM.getCurrentState().getName() == 'inputChoice':
self.sendForceArrowUpdateAsap = True
self.updateLocalForceArrow()
self.controlKeyPressed = True
self.sendUpdate('setAvatarChoice', [self.curForce, self.curHeading])
self.gameFSM.request('waitServerChoices')
def startTimer(self):
now = globalClock.getFrameTime()
elapsed = now - self.timerStartTime
self.timer.posInTopRightCorner()
self.timer.setTime(IceGameGlobals.InputTimeout)
self.timer.countdown(IceGameGlobals.InputTimeout - elapsed, self.handleChoiceTimeout)
self.timer.show()
def setTimerStartTime(self, timestamp):
if not self.hasLocalToon:
return
self.timerStartTime = globalClockDelta.networkToLocalTime(timestamp)
if self.timer != None:
self.startTimer()
return
def handleChoiceTimeout(self):
self.sendUpdate('setAvatarChoice', [0, 0])
self.gameFSM.request('waitServerChoices')
def localTireNp(self):
ret = None
if self.localAvId in self.tireDict:
ret = self.tireDict[self.localAvId]['tireNodePath']
return ret
def localTireBody(self):
ret = None
if self.localAvId in self.tireDict:
ret = self.tireDict[self.localAvId]['tireBody']
return ret
def getTireBody(self, avId):
ret = None
if avId in self.tireDict:
ret = self.tireDict[avId]['tireBody']
return ret
def getTireNp(self, avId):
ret = None
if avId in self.tireDict:
ret = self.tireDict[avId]['tireNodePath']
return ret
def updateForceArrow(self, avId, curHeading, curForce):
forceArrow = self.forceArrowDict[avId]
tireNp = self.tireDict[avId]['tireNodePath']
tireNp.setH(curHeading)
tireBody = self.tireDict[avId]['tireBody']
tireBody.setQuaternion(tireNp.getQuat())
self.notify.debug('curHeading = %s' % curHeading)
yScale = curForce / 100.0
yScale *= 1
headY = yScale * 15
xScale = (yScale - 1) / 2.0 + 1.0
shaft = forceArrow.find('**/arrow_shaft')
head = forceArrow.find('**/arrow_head')
shaft.setScale(xScale, yScale, 1)
head.setPos(0, headY, 0)
head.setScale(xScale, xScale, 1)
def updateLocalForceArrow(self):
avId = self.localAvId
self.b_setForceArrowInfo(avId, self.curHeading, self.curForce)
def __aimTask(self, task):
if not hasattr(self, 'arrowKeys'):
return task.done
dt = globalClock.getDt()
headingMomentumChange = dt * 60.0
forceMomentumChange = dt * 160.0
arrowUpdate = False
arrowRotating = False
arrowUp = False
arrowDown = False
if self.arrowKeys.upPressed() and not self.arrowKeys.downPressed():
self.forceMomentum += forceMomentumChange
if self.forceMomentum < 0:
self.forceMomentum = 0
if self.forceMomentum > 50:
self.forceMomentum = 50
oldForce = self.curForce
self.curForce += self.forceMomentum * dt
arrowUpdate = True
if oldForce < self.MaxLocalForce:
arrowUp = True
elif self.arrowKeys.downPressed() and not self.arrowKeys.upPressed():
self.forceMomentum += forceMomentumChange
if self.forceMomentum < 0:
self.forceMomentum = 0
if self.forceMomentum > 50:
self.forceMomentum = 50
oldForce = self.curForce
self.curForce -= self.forceMomentum * dt
arrowUpdate = True
if oldForce > 0.01:
arrowDown = True
else:
self.forceMomentum = 0
if self.arrowKeys.leftPressed() and not self.arrowKeys.rightPressed():
self.headingMomentum += headingMomentumChange
if self.headingMomentum < 0:
self.headingMomentum = 0
if self.headingMomentum > 50:
self.headingMomentum = 50
self.curHeading += self.headingMomentum * dt
arrowUpdate = True
arrowRotating = True
elif self.arrowKeys.rightPressed() and not self.arrowKeys.leftPressed():
self.headingMomentum += headingMomentumChange
if self.headingMomentum < 0:
self.headingMomentum = 0
if self.headingMomentum > 50:
self.headingMomentum = 50
self.curHeading -= self.headingMomentum * dt
arrowUpdate = True
arrowRotating = True
else:
self.headingMomentum = 0
if arrowUpdate:
self.normalizeHeadingAndForce()
self.updateLocalForceArrow()
if arrowRotating:
if not self.arrowRotateSound.status() == self.arrowRotateSound.PLAYING:
base.playSfx(self.arrowRotateSound, looping=True)
else:
self.arrowRotateSound.stop()
if arrowUp:
if not self.arrowUpSound.status() == self.arrowUpSound.PLAYING:
base.playSfx(self.arrowUpSound, looping=False)
else:
self.arrowUpSound.stop()
if arrowDown:
if not self.arrowDownSound.status() == self.arrowDownSound.PLAYING:
base.playSfx(self.arrowDownSound, looping=False)
else:
self.arrowDownSound.stop()
return task.cont
def normalizeHeadingAndForce(self):
if self.curForce > self.MaxLocalForce:
self.curForce = self.MaxLocalForce
if self.curForce < 0.01:
self.curForce = 0.01
def setTireInputs(self, tireInputs):
if not self.hasLocalToon:
return
self.allTireInputs = tireInputs
self.gameFSM.request('moveTires')
def enableAllTireBodies(self):
for avId in self.tireDict.keys():
self.tireDict[avId]['tireBody'].enable()
def disableAllTireBodies(self):
for avId in self.tireDict.keys():
self.tireDict[avId]['tireBody'].disable()
def areAllTiresDisabled(self):
for avId in self.tireDict.keys():
if self.tireDict[avId]['tireBody'].isEnabled():
return False
return True
def __moveTiresTask(self, task):
if self.areAllTiresDisabled():
self.sendTirePositions()
self.gameFSM.request('synch')
return task.done
return task.cont
def sendTirePositions(self):
tirePositions = []
for index in xrange(len(self.avIdList)):
avId = self.avIdList[index]
tire = self.getTireBody(avId)
pos = Point3(tire.getPosition())
tirePositions.append([pos[0], pos[1], pos[2]])
for index in xrange(len(self.avIdList), 4):
avId = -index
tire = self.getTireBody(avId)
pos = Point3(tire.getPosition())
tirePositions.append([pos[0], pos[1], pos[2]])
self.sendUpdate('endingPositions', [tirePositions])
def setFinalPositions(self, finalPos):
if not self.hasLocalToon:
return
for index in xrange(len(self.avIdList)):
avId = self.avIdList[index]
tire = self.getTireBody(avId)
np = self.getTireNp(avId)
pos = finalPos[index]
tire.setPosition(pos[0], pos[1], pos[2])
np.setPos(pos[0], pos[1], pos[2])
for index in xrange(len(self.avIdList), 4):
avId = -index
tire = self.getTireBody(avId)
np = self.getTireNp(avId)
pos = finalPos[index]
tire.setPosition(pos[0], pos[1], pos[2])
np.setPos(pos[0], pos[1], pos[2])
def updateInfoLabel(self):
self.infoLabel['text'] = TTLocalizer.IceGameInfo % {'curMatch': self.curMatch + 1,
'numMatch': IceGameGlobals.NumMatches,
'curRound': self.curRound + 1,
'numRound': IceGameGlobals.NumRounds}
def setMatchAndRound(self, match, round):
if not self.hasLocalToon:
return
self.curMatch = match
self.curRound = round
self.updateInfoLabel()
def setScores(self, match, round, scores):
if not self.hasLocalToon:
return
self.newMatch = match
self.newRound = round
self.newScores = scores
def setNewState(self, state):
if not self.hasLocalToon:
return
self.notify.debug('setNewState gameFSM=%s newState=%s' % (self.gameFSM, state))
self.gameFSM.request(state)
def putAllTiresInStartingPositions(self):
for index in xrange(len(self.avIdList)):
avId = self.avIdList[index]
np = self.tireDict[avId]['tireNodePath']
np.setPos(IceGameGlobals.StartingPositions[index])
self.notify.debug('avId=%s newPos=%s' % (avId, np.getPos))
np.setHpr(0, 0, 0)
quat = np.getQuat()
body = self.tireDict[avId]['tireBody']
body.setPosition(IceGameGlobals.StartingPositions[index])
body.setQuaternion(quat)
for index in xrange(len(self.avIdList), 4):
avId = -index
np = self.tireDict[avId]['tireNodePath']
np.setPos(IceGameGlobals.StartingPositions[index])
self.notify.debug('avId=%s newPos=%s' % (avId, np.getPos))
np.setHpr(0, 0, 0)
quat = np.getQuat()
body = self.tireDict[avId]['tireBody']
body.setPosition(IceGameGlobals.StartingPositions[index])
body.setQuaternion(quat)
def b_setForceArrowInfo(self, avId, force, heading):
self.setForceArrowInfo(avId, force, heading)
self.d_setForceArrowInfo(avId, force, heading)
def d_setForceArrowInfo(self, avId, force, heading):
sendIt = False
curTime = self.getCurrentGameTime()
if self.sendForceArrowUpdateAsap:
sendIt = True
elif curTime - self.lastForceArrowUpdateTime > 0.2:
sendIt = True
if sendIt:
self.sendUpdate('setForceArrowInfo', [avId, force, heading])
self.sendForceArrowUpdateAsap = False
self.lastForceArrowUpdateTime = self.getCurrentGameTime()
def setForceArrowInfo(self, avId, force, heading):
if not self.hasLocalToon:
return
self.updateForceArrow(avId, force, heading)
def setupStartOfMatch(self):
self.putAllTiresInStartingPositions()
szId = self.getSafezoneId()
self.numTreasures = IceGameGlobals.NumTreasures[szId]
if self.treasures:
for treasure in self.treasures:
treasure.destroy()
self.treasures = []
index = 0
treasureMargin = IceGameGlobals.TireRadius + 1.0
while len(self.treasures) < self.numTreasures:
xPos = self.randomNumGen.randrange(IceGameGlobals.MinWall[0] + 5, IceGameGlobals.MaxWall[0] - 5)
yPos = self.randomNumGen.randrange(IceGameGlobals.MinWall[1] + 5, IceGameGlobals.MaxWall[1] - 5)
self.notify.debug('yPos=%s' % yPos)
pos = Point3(xPos, yPos, IceGameGlobals.TireRadius)
newTreasure = IceTreasure.IceTreasure(self.treasureModel, pos, index, self.doId, penalty=False)
goodSpot = True
for obstacle in self.obstacles:
if newTreasure.nodePath.getDistance(obstacle) < treasureMargin:
goodSpot = False
break
if goodSpot:
for treasure in self.treasures:
if newTreasure.nodePath.getDistance(treasure.nodePath) < treasureMargin:
goodSpot = False
break
if goodSpot:
self.treasures.append(newTreasure)
index += 1
else:
newTreasure.destroy()
self.numPenalties = IceGameGlobals.NumPenalties[szId]
if self.penalties:
for penalty in self.penalties:
penalty.destroy()
self.penalties = []
index = 0
while len(self.penalties) < self.numPenalties:
xPos = self.randomNumGen.randrange(IceGameGlobals.MinWall[0] + 5, IceGameGlobals.MaxWall[0] - 5)
yPos = self.randomNumGen.randrange(IceGameGlobals.MinWall[1] + 5, IceGameGlobals.MaxWall[1] - 5)
self.notify.debug('yPos=%s' % yPos)
pos = Point3(xPos, yPos, IceGameGlobals.TireRadius)
newPenalty = IceTreasure.IceTreasure(self.penaltyModel, pos, index, self.doId, penalty=True)
goodSpot = True
for obstacle in self.obstacles:
if newPenalty.nodePath.getDistance(obstacle) < treasureMargin:
goodSpot = False
break
if goodSpot:
for treasure in self.treasures:
if newPenalty.nodePath.getDistance(treasure.nodePath) < treasureMargin:
goodSpot = False
break
if goodSpot:
for penalty in self.penalties:
if newPenalty.nodePath.getDistance(penalty.nodePath) < treasureMargin:
goodSpot = False
break
if goodSpot:
self.penalties.append(newPenalty)
index += 1
else:
newPenalty.destroy()
def toonHitSomething(self, entry):
self.notify.debug('---- treasure Enter ---- ')
self.notify.debug('%s' % entry)
name = entry.getIntoNodePath().getName()
parts = name.split('-')
if len(parts) < 3:
self.notify.debug('collided with %s, but returning' % name)
return
if not int(parts[1]) == self.doId:
self.notify.debug("collided with %s, but doId doesn't match" % name)
return
treasureNum = int(parts[2])
if 'penalty' in parts[0]:
self.__penaltyGrabbed(treasureNum)
else:
self.__treasureGrabbed(treasureNum)
def __treasureGrabbed(self, treasureNum):
self.treasures[treasureNum].showGrab()
self.treasureGrabSound.play()
self.sendUpdate('claimTreasure', [treasureNum])
def setTreasureGrabbed(self, avId, treasureNum):
if not self.hasLocalToon:
return
self.notify.debug('treasure %s grabbed by %s' % (treasureNum, avId))
if avId != self.localAvId:
self.treasures[treasureNum].showGrab()
i = self.avIdList.index(avId)
self.scores[i] += 1
self.scorePanels[i].setScore(self.scores[i])
def __penaltyGrabbed(self, penaltyNum):
self.penalties[penaltyNum].showGrab()
self.sendUpdate('claimPenalty', [penaltyNum])
def setPenaltyGrabbed(self, avId, penaltyNum):
if not self.hasLocalToon:
return
self.notify.debug('penalty %s grabbed by %s' % (penaltyNum, avId))
if avId != self.localAvId:
self.penalties[penaltyNum].showGrab()
i = self.avIdList.index(avId)
self.scores[i] -= 1
self.scorePanels[i].setScore(self.scores[i])
def postStep(self):
DistributedIceWorld.DistributedIceWorld.postStep(self)
for count in range(self.colCount):
c0, c1 = self.getOrderedContacts(count)
if c1 in self.tireCollideIds:
tireIndex = self.tireCollideIds.index(c1)
if c0 in self.tireCollideIds:
self.tireSounds[tireIndex]['tireHit'].play()
elif c0 == self.wallCollideId:
self.tireSounds[tireIndex]['wallHit'].play()
elif c0 == self.obstacleCollideId:
self.tireSounds[tireIndex]['obstacleHit'].play()
def forceLocalToonToTire(self):
toon = localAvatar
if toon and self.localAvId in self.tireDict:
tireNp = self.tireDict[self.localAvId]['tireNodePath']
toon.reparentTo(tireNp)
toon.setPosHpr(0, 0, 0, 0, 0, 0)
toon.setY(1.0)
toon.setZ(-3)
|
|
# coding: utf-8
"""
Deals with models for deepseg module. Available models are listed under MODELS.
"""
import os
import json
import logging
import colored
import spinalcordtoolbox as sct
import spinalcordtoolbox.download
logger = logging.getLogger(__name__)
# List of models. The convention for model names is: (species)_(university)_(contrast)_region
# Regions could be: sc, gm, lesion, tumor
MODELS = {
"t2star_sc": {
"url": [
"https://github.com/ivadomed/t2star_sc/releases/download/r20200622/r20200622_t2star_sc.zip",
"https://osf.io/v9hs8/download?version=5",
],
"description": "Cord segmentation model on T2*-weighted contrast.",
"contrasts": ["t2star"],
"default": True,
},
"mice_uqueensland_sc": {
"url": [
"https://github.com/ivadomed/mice_uqueensland_sc/releases/download/r20200622/r20200622_mice_uqueensland_sc.zip",
"https://osf.io/nu3ma/download?version=6",
],
"description": "Cord segmentation model on mouse MRI. Data from University of Queensland.",
"contrasts": ["t1"],
"default": False,
},
"mice_uqueensland_gm": {
"url": [
"https://github.com/ivadomed/mice_uqueensland_gm/releases/download/r20200622/r20200622_mice_uqueensland_gm.zip",
"https://osf.io/mfxwg/download?version=6",
],
"description": "Gray matter segmentation model on mouse MRI. Data from University of Queensland.",
"contrasts": ["t1"],
"default": False,
},
"t2_tumor": {
"url": [
"https://github.com/ivadomed/t2_tumor/archive/r20201215.zip"
],
"description": "Cord tumor segmentation model, trained on T2-weighted contrast.",
"contrasts": ["t2"],
"default": False,
},
"findcord_tumor": {
"url": [
"https://github.com/ivadomed/findcord_tumor/archive/r20201215.zip"
],
"description": "Cord localisation model, trained on T2-weighted images with tumor.",
"contrasts": ["t2"],
"default": False,
},
"model_seg_sctumor-edema-cavity_t2-t1_unet3d-multichannel": {
"url": [
"https://github.com/ivadomed/model_seg_sctumor-edema-cavity_t2-t1_unet3d-multichannel/archive/r20201215.zip"
],
"description": "Multiclass cord tumor segmentation model.",
"contrasts": ["t2", "t1"],
"default": False,
},
"model_seg_exvivo_gm-wm_t2_unet2d-multichannel-softseg": {
"url": [
"https://github.com/ivadomed/model_seg_exvivo_gm-wm_t2_unet2d-multichannel-softseg/archive/r20210401_v2.zip"
],
"description": "Grey/white matter seg on exvivo human T2w.",
"contrasts": ["t2"],
"default": False,
}
}
# List of task. The convention for task names is: action_(animal)_region_(contrast)
# Regions could be: sc, gm, lesion, tumor
TASKS = {
'seg_sc_t2star':
{'description': 'Cord segmentation on T2*-weighted contrast.',
'models': ['t2star_sc']},
'seg_mice_sc':
{'description': 'Cord segmentation on mouse MRI.',
'models': ['mice_uqueensland_sc']},
'seg_mice_gm':
{'description': 'Gray matter segmentation on mouse MRI.',
'models': ['mice_uqueensland_gm']},
'seg_tumor_t2':
{'description': 'Cord tumor segmentation on T2-weighted contrast.',
'models': ['findcord_tumor', 't2_tumor']},
'seg_tumor-edema-cavity_t1-t2':
{'description': 'Multiclass cord tumor segmentation.',
'models': ['findcord_tumor', 'model_seg_sctumor-edema-cavity_t2-t1_unet3d-multichannel']},
'seg_exvivo_gm-wm_t2':
{'description': 'Grey/white matter seg on exvivo human T2w.',
'models': ['model_seg_exvivo_gm-wm_t2_unet2d-multichannel-softseg']}
}
def get_required_contrasts(task):
"""
Get required contrasts according to models in tasks.
:return: list: List of required contrasts
"""
contrasts_required = set()
for model in TASKS[task]['models']:
for contrast in MODELS[model]['contrasts']:
contrasts_required.add(contrast)
return list(contrasts_required)
def folder(name_model):
"""
Return absolute path of deep learning models.
:param name: str: Name of model.
:return: str: Folder to model
"""
return os.path.join(sct.__deepseg_dir__, name_model)
def install_model(name_model):
"""
Download and install specified model under SCT installation dir.
:param name: str: Name of model.
:return: None
"""
logger.info("\nINSTALLING MODEL: {}".format(name_model))
sct.download.install_data(MODELS[name_model]['url'], folder(name_model))
def install_default_models():
"""
Download all default models and install them under SCT installation dir.
:return: None
"""
for name_model, value in MODELS.items():
if value['default']:
install_model(name_model)
def is_valid(path_model):
"""
Check if model has the necessary files and follow naming conventions:
- Folder should have the same name as the enclosed files.
:param path_model: str: Absolute path to folder that encloses the model files.
"""
name_model = path_model.rstrip(os.sep).split(os.sep)[-1]
return (os.path.exists(os.path.join(path_model, name_model + '.pt')) or
os.path.exists(os.path.join(path_model, name_model + '.onnx'))) and os.path.exists(
os.path.join(path_model, name_model + '.json'))
def list_tasks():
"""
Display available tasks with description.
:return: dict: Tasks that are installed
"""
return {name: value for name, value in TASKS.items()}
def display_list_tasks():
tasks = sct.deepseg.models.list_tasks()
# Display beautiful output
color = {True: 'green', False: 'red'}
print("{:<30s}{:<50s}{:<20s}MODELS".format("TASK", "DESCRIPTION", "INPUT CONTRASTS"))
print("-" * 120)
for name_task, value in tasks.items():
path_models = [sct.deepseg.models.folder(name_model) for name_model in value['models']]
are_models_valid = [sct.deepseg.models.is_valid(path_model) for path_model in path_models]
task_status = colored.stylize(name_task.ljust(30),
colored.fg(color[all(are_models_valid)]))
description_status = colored.stylize(value['description'].ljust(50),
colored.fg(color[all(are_models_valid)]))
models_status = ', '.join([colored.stylize(model_name,
colored.fg(color[is_valid]))
for model_name, is_valid in zip(value['models'], are_models_valid)])
input_contrasts = colored.stylize(str(', '.join(model_name for model_name in
get_required_contrasts(name_task))).ljust(20),
colored.fg(color[all(are_models_valid)]))
print("{}{}{}{}".format(task_status, description_status, input_contrasts, models_status))
print(
'\nLegend: {} | {}\n'.format(
colored.stylize("installed", colored.fg(color[True])),
colored.stylize("not installed", colored.fg(color[False]))))
exit(0)
def get_metadata(folder_model):
"""
Get metadata from json file located in folder_model
:param path_model: str: Model folder
:return: dict
"""
fname_metadata = os.path.join(folder_model, os.path.basename(folder_model) + '.json')
with open(fname_metadata, "r") as fhandle:
metadata = json.load(fhandle)
return metadata
|
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Noder defines Child ordered dicts with <DoStr> as KeyStr.
The items inside are automatically setted with Noded<DoStr><TypeStr> and have
a Pointer to the parent InstanceVariable. This is the beginning for buiding high
arborescent and (possibly circular) structures of objects.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Applyiers.Filterer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
from ShareYourSystem.Standards.Itemizers import Pather
#</ImportSpecificModules>
#<DefineLocals>
NodingPrefixGetStr='<'
NodingSuffixGetStr='>'
NodingCollectionPrefixStr="Node"
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class NoderClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'NodingCollectionStr',
'NodedCollectionOrderedDict',
'NodedCollectionStr',
'NodedKeyStr',
'NodedCollectionIndexInt'
]
def default_init(self,
_NodingCollectionStr="",
_NodedCollectionOrderedDict=None,
_NodedCollectionStr="",
_NodedKeyStr="",
_NodedCollectionIndexInt=-1,
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
#global
global NodingCollectionPrefixStr
NodedMethodStr='__setattr__'
NodedMethod=getattr(self,NodedMethodStr)
#Int and Set Child attributes
NodedMethod(
NodingCollectionPrefixStr+'CollectionStr',
"Global"
)
NodedMethod(
NodingCollectionPrefixStr+'IndexInt',
-1
)
"""
NodedMethod(
NodingCollectionPrefixStr+'KeyStr',
SYS._filter(
lambda __ListedVariable:
id(__ListedVariable)==self.IdStr,
sys.modules['__main__'].globals().values()
)
)
"""
NodedMethod(
NodingCollectionPrefixStr+'KeyStr',
""
)
self.point(
None,
NodingCollectionPrefixStr+'PointOrderedDict'
)
self.point(
None,
NodingCollectionPrefixStr+'PointDeriveNoder'
)
def do_node(self):
#debug
'''
self.debug(("self.",self,['NodingCollectionStr']))
'''
#Get the NodedStr
if self.NodingCollectionStr!="":
#set the Noded OrderedDict and KeyStr
NodedCollectionOrderedDictKeyStr=self.NodingCollectionStr+'CollectionOrderedDict'
#self.NodeKeyStrKeyStr=self.NodedPrefixStr+'KeyStr'
try:
self.NodedCollectionOrderedDict=getattr(self,NodedCollectionOrderedDictKeyStr)
except AttributeError:
self.__setattr__(
NodedCollectionOrderedDictKeyStr,
collections.OrderedDict()
)
self.NodedCollectionOrderedDict=getattr(self,NodedCollectionOrderedDictKeyStr)
'''
try:
self.NodeKeyStr=getattr(self,self.NodeKeyStrKeyStr)
except AttributeError:
self.__setattr__(self.NodeKeyStrKeyStr,"")
self.NodeKeyStr=getattr(self,self.NodeKeyStrKeyStr)
'''
#debug
'''
self.debug(('self.',self,['NodedPrefixStr','NodedCollectionOrderedDict',]))
'''
"""
#If this is a set of a tree of nodes then also init the nodifying attributes
#if 'IsNoderBool' not in _KwargVariablesDict or _KwargVariablesDict['IsNoderBool']:
#NodePointDeriveNoderKeyStr=self.NodedPrefixStr+'ParentPointer'
#NodedIndexIntKeyStr=self.NodedPrefixStr+'Int'
#NodedPathStrKeyStr=self.NodedPrefixStr+'PathStr'
#NodedGrandParentPointersListKeyStr=self.NodedPrefixStr+'GrandParentPointersList'
#try:
# self.NodedIndexInt=getattr(self,NodedIndexIntKeyStr)
#except AttributeError:
# self.__setattr__(NodedIndexIntKeyStr,-1)
# self.NodedIndexInt=getattr(self,NodedIndexIntKeyStr)
#try:
# self.NodePointDeriveNoder=getattr(self,NodePointDeriveNoderKeyStr)
#except AttributeError:
# self.__setattr__(NodePointDeriveNoderKeyStr,None)
# self.NodePointDeriveNoder=getattr(self,NodePointDeriveNoderKeyStr)
#debug
'''
self.debug(
[
('vars ',vars(),['NodePointDeriveNoderKeyStr']),
('self.',self,[NodePointDeriveNoderKeyStr])
]
)
'''
"""
#Return self
#return self
#<Hook>@Hooker.HookerClass(**{'HookingAfterVariablesList':[BaseClass.get]})
#@Imitater.ImitaterClass()
def mimic_get(self):
#debug
'''
self.debug(("self.",self,['GettingKeyVariable']))
'''
#Definition
OutputDict={'HookingIsBool':True}
#Appending set
if self.GettingKeyVariable.startswith(NodingPrefixGetStr):
#Definition the SplittedStrsList
SplittedStrsList=self.GettingKeyVariable.split(NodingSuffixGetStr)
#Definition the NodingCollectionStr
NodingCollectionStr=NodingPrefixGetStr.join(
SplittedStrsList[0].split(NodingPrefixGetStr)[1:])
#debug
'''
self.debug(
[
'NodingCollectionStr is '+NodingCollectionStr,
'We are going to node'
]
)
'''
#Nodify
self.node(
NodingCollectionStr,
#**{'IsNoderBool':False}
)
#Definition of the KeyStr
GetKeyStr=NodingSuffixGetStr.join(SplittedStrsList[1:])
#debug
'''
self.debug(
[
'node is done',
'GetKeyStr is '+GetKeyStr,
'self.NodedCollectionOrderedDict is '+str(self.NodedCollectionOrderedDict)
]
)
'''
#Get with a digited KeyStr case
if GetKeyStr.isdigit():
#Definition the GetInt
GetInt=(int)(GetKeyStr)
#Check if the size is ok
if GetInt<len(self.NodedCollectionOrderedDict):
#Get the GettedVariable
self.GettedValueVariable=SYS.get(
self.NodedCollectionOrderedDict,
'values',
GetInt
)
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
#Get in the ValueVariablesList
elif GetKeyStr=="":
#Get the GettedVariable
self.GettedValueVariable=self.NodedCollectionOrderedDict.values()
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
elif GetKeyStr in self.NodedCollectionOrderedDict:
#Get the GettedVariable
self.GettedValueVariable=self.NodedCollectionOrderedDict[GetKeyStr]
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
#Call the parent get method
if OutputDict['HookingIsBool']:
#debug
'''
self.debug(
[
('self.',self,['GettingKeyVariable']),
'BaseClass.get is '+str(BaseClass.get)
]
)
'''
#Call
return BaseClass.get(self)
else:
#return
return OutputDict
#<Hook>@Hooker.HookerClass(**{'HookingAfterVariablesList':[BaseClass.set]})
#@Imitater.ImitaterClass()
def mimic_set(self):
""" """
#debug
'''
self.debug(('self.',self,[
'SettingKeyVariable',
#'SettingValueVariable'
]))
'''
#Definition
OutputDict={'HookingIsBool':True}
#Appending set
if self.SettingKeyVariable.startswith(NodingPrefixGetStr):
#Definition the SplittedStrsList
SplittedStrsList=self.SettingKeyVariable.split(NodingSuffixGetStr)
#Definition the NodingCollectionStr
NodingCollectionStr=NodingPrefixGetStr.join(
SplittedStrsList[0].split(NodingPrefixGetStr)[1:])
#Check if it is an append of Nodes
IsNoderBool='NoderClass' in map(
lambda __Class:
__Class.__name__,
type(self.SettingValueVariable).__mro__
)
#debug
'''
self.debug(('vars ',vars(),['NodingCollectionStr','IsNoderBool']))
'''
#Nodify
self.node(
NodingCollectionStr,
#**{'IsNoderBool':IsNoderBool}
)
#Definition the KeyStr
SetKeyStr=NodingSuffixGetStr.join(SplittedStrsList[1:])
#debug
'''
self.debug('SetKeyStr is '+SetKeyStr)
'''
#Append (or set if it is already in)
Pather.setWithPathVariableAndKeyVariable(
self.NodedCollectionOrderedDict,
Pather.PathPrefixStr+SetKeyStr,
self.SettingValueVariable
)
if Pather.PathPrefixStr not in SetKeyStr:
#debug
'''
self.debug(('self.',self,['SettingValueVariable']))
'''
#If it is an object
if IsNoderBool:
#global
global NodingCollectionPrefixStr
NodedMethodStr='__setattr__'
NodedMethod=getattr(self.SettingValueVariable,NodedMethodStr)
#Int and Set Child attributes
NodedMethod(
NodingCollectionPrefixStr+'CollectionStr',
self.NodingCollectionStr
)
NodedMethod(
NodingCollectionPrefixStr+'IndexInt',
len(self.NodedCollectionOrderedDict)-1
)
NodedMethod(
NodingCollectionPrefixStr+'KeyStr',
SetKeyStr
)
self.SettingValueVariable.point(
self.NodedCollectionOrderedDict,
NodingCollectionPrefixStr+'PointOrderedDict'
)
self.SettingValueVariable.point(
self,
NodingCollectionPrefixStr+'PointDeriveNoder'
)
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
return OutputDict
#Call the parent get method
if OutputDict['HookingIsBool']:
return BaseClass.set(self)
#</DefineClass>
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""A module that provides parsing utilities for argparse.
For details of how argparse argument pasers work, see:
http://docs.python.org/dev/library/argparse.html#type
Example usage:
import argparse
import arg_parsers
parser = argparse.ArgumentParser()
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict(),
action=arg_parser.FloatingListValuesCatcher())
parser.add_argument(
'--delay',
default='5s',
type=arg_parsers.Duration(lower_bound='1s', upper_bound='10s')
parser.add_argument(
'--disk-size',
default='10GB',
type=arg_parsers.BinarySize(lower_bound='1GB', upper_bound='10TB')
# will emit a warning about space-separated metadata
res = parser.parse_args(
'--names --metadata x=y,a=b c=d --delay 1s --disk-size 10gb'.split())
assert res.metadata == {'a': 'b', 'c': 'd', 'x': 'y'}
assert res.delay == 1
assert res.disk_size == 10737418240
"""
import argparse
import datetime
import re
from googlecloudsdk.core import log
__all__ = ['Duration', 'BinarySize']
class Error(Exception):
"""Exceptions that are defined by this module."""
class ArgumentTypeError(Error, argparse.ArgumentTypeError):
"""Exceptions for parsers that are used as argparse types."""
class ArgumentParsingError(Error, argparse.ArgumentError):
"""Raised when there is a problem with user input.
argparse.ArgumentError takes both the action and a message as constructor
parameters.
"""
def _GenerateErrorMessage(error, user_input=None, error_idx=None):
"""Constructs an error message for an exception.
Args:
error: str, The error message that should be displayed. This
message should not end with any punctuation--the full error
message is constructed by appending more information to error.
user_input: str, The user input that caused the error.
error_idx: int, The index at which the error occurred. If None,
the index will not be printed in the error message.
Returns:
str: The message to use for the exception.
"""
if user_input is None:
return error
elif not user_input: # Is input empty?
return error + '; received empty string'
elif error_idx is None:
return error + '; received: ' + user_input
return ('{error_message} at index {error_idx}: {user_input}'
.format(error_message=error, user_input=user_input,
error_idx=error_idx))
_VALUE_PATTERN = r"""
^ # Beginning of input marker.
(?P<amount>\d+) # Amount.
((?P<unit>[a-zA-Z]+))? # Optional unit.
$ # End of input marker.
"""
_SECOND = 1
_MINUTE = 60 * _SECOND
_HOUR = 60 * _MINUTE
_DAY = 24 * _HOUR
# The units are adopted from sleep(1):
# http://linux.die.net/man/1/sleep
_DURATION_SCALES = {
's': _SECOND,
'm': _MINUTE,
'h': _HOUR,
'd': _DAY,
}
_BINARY_SIZE_SCALES = {
'B': 1,
'KB': 1 << 10,
'MB': 1 << 20,
'GB': 1 << 30,
'TB': 1 << 40,
'PB': 1 << 50,
'KiB': 1 << 10,
'MiB': 1 << 20,
'GiB': 1 << 30,
'TiB': 1 << 40,
'PiB': 1 << 50,
}
def _ValueParser(scales, default_unit, lower_bound=None, upper_bound=None):
"""A helper that returns a function that can parse values with units.
Casing for all units matters.
Args:
scales: {str: int}, A dictionary mapping units to their magnitudes in
relation to the lowest magnitude unit in the dict.
default_unit: str, The default unit to use if the user's input is
missing unit.
lower_bound: str, An inclusive lower bound.
upper_bound: str, An inclusive upper bound.
Returns:
A function that can parse values.
"""
def UnitsByMagnitude():
"""Returns a list of the units in scales sorted by magnitude."""
return [key for key, _
in sorted(scales.iteritems(), key=lambda value: value[1])]
def Parse(value):
"""Parses value that can contain a unit."""
match = re.match(_VALUE_PATTERN, value, re.VERBOSE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'given value must be of the form INTEGER[UNIT] where units '
'can be one of {0}'
.format(', '.join(UnitsByMagnitude())),
user_input=value))
amount = int(match.group('amount'))
unit = match.group('unit')
if unit is None:
return amount * scales[default_unit]
elif unit in scales:
return amount * scales[unit]
else:
raise ArgumentTypeError(_GenerateErrorMessage(
'unit must be one of {0}'.format(', '.join(UnitsByMagnitude())),
user_input=unit))
if lower_bound is None:
parsed_lower_bound = None
else:
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
else:
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
elif parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
else:
return parsed_value
return ParseWithBoundsChecking
def Duration(lower_bound=None, upper_bound=None):
"""Returns a function that can parse time durations.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "s", "m", "h", and
"d" for seconds, seconds, minutes, hours, and days,
respectively. The casing of the units matters.
If the unit is omitted, seconds is assumed.
The result is parsed in seconds. For example:
parser = Duration()
assert parser('10s') == 10
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single time duration as input to be
parsed.
"""
return _ValueParser(_DURATION_SCALES, default_unit='s',
lower_bound=lower_bound, upper_bound=upper_bound)
def BinarySize(lower_bound=None, upper_bound=None):
"""Returns a function that can parse binary sizes.
Binary sizes are defined as base-2 values representing number of
bytes.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "B", "KB", "MB",
"GB", "TB", "KiB", "MiB", "GiB", "TiB", "PiB". If the unit is
omitted, GB is assumed.
The result is parsed in bytes. For example:
parser = BinarySize()
assert parser('10GB') == 1073741824
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single binary size as input to be
parsed.
"""
return _ValueParser(_BINARY_SIZE_SCALES, default_unit='GB',
lower_bound=lower_bound, upper_bound=upper_bound)
_KV_PAIR_DELIMITER = '='
class HostPort(object):
"""A class for holding host and port information."""
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s):
"""Parse the given string into a HostPort object.
This can be used as an argparse type.
Args:
s: str, The string to parse.
Raises:
ArgumentTypeError: If the string is not valid.
Returns:
HostPort, The parsed object.
"""
if not s:
return HostPort(None, None)
if ':' not in s:
return HostPort(s, None)
parts = s.split(':')
if len(parts) > 2:
raise ArgumentTypeError(
_GenerateErrorMessage('Failed to parse host and port', user_input=s))
return HostPort(parts[0] or None, parts[1] or None)
class Day(object):
"""A class for parsing a datetime object for a specific day."""
@staticmethod
def Parse(s):
if not s:
return None
try:
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage(
"Failed to parse date. Value should be in the form 'YYYY-MM-DD",
user_input=s))
class Datetime(object):
"""A class for parsing a datetime object in UTC timezone."""
@staticmethod
def Parse(s):
"""Parses a string value into a Datetime object."""
if not s:
return None
accepted_formats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ')
# TODO(user): Add timezone support.
for date_format in accepted_formats:
try:
return datetime.datetime.strptime(s, date_format)
except ValueError:
pass
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date. Value should be in ISO or RFC3339 format',
user_input=s))
def BoundedInt(lower_bound=None, upper_bound=None):
"""Returns a function that can parse integers within some bound."""
def _Parse(value):
"""Parses value as an int, raising ArgumentTypeError if out of bounds."""
v = int(value)
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return _Parse
def _TokenizeQuotedList(arg_value, delim=','):
"""Tokenize an argument into a list.
Args:
arg_value: str, The raw argument.
delim: str, The delimiter on which to split the argument string.
Returns:
[str], The tokenized list.
"""
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
"""Base class for arg types."""
class ArgList(ArgType):
"""Interpret an argument value as a list.
Intended to be used as the type= for a flag argument. Splits the string on
commas or another delimiter and returns a list.
By default, splits on commas:
'a,b,c' -> ['a', 'b', 'c']
There is an available syntax for using an alternate delimiter:
'^:^a,b:c' -> ['a,b', 'c']
'^::^a:b::c' -> ['a:b', 'c']
'^,^^a^,b,c' -> ['^a^', ',b', 'c']
"""
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self, element_type=None, min_length=0, max_length=None,
choices=None):
"""Initialize an ArgList.
Args:
element_type: (str)->str, A function to apply to each of the list items.
min_length: int, The minimum size of the list.
max_length: int, The maximum size of the list.
choices: [element_type], a list of valid possibilities for elements. If
None, then no constraints are imposed.
Returns:
(str)->[str], A function to parse the list of values in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
"""
self.element_type = element_type
if choices:
def ChoiceType(raw_value):
if element_type:
typed_value = element_type(raw_value)
else:
typed_value = raw_value
if typed_value not in choices:
raise ArgumentTypeError('{value} must be one of [{choices}]'.format(
value=typed_value, choices=', '.join(
[str(choice) for choice in choices])))
return typed_value
self.element_type = ChoiceType
self.min_length = min_length
self.max_length = max_length
def __call__(self, arg_value): # pylint:disable=missing-docstring
delim = self.DEFAULT_DELIM_CHAR
if (arg_value.startswith(self.ALT_DELIM_CHAR) and
self.ALT_DELIM_CHAR in arg_value[1:]):
delim, arg_value = arg_value[1:].split(self.ALT_DELIM_CHAR, 1)
if not delim:
raise ArgumentTypeError(
'Invalid delimiter. Please see `gcloud topic escaping` for '
'information on escaping list or dictionary flag values.')
arg_list = _TokenizeQuotedList(arg_value, delim=delim)
# TODO(user): These exceptions won't present well to the user.
if len(arg_list) < self.min_length:
raise ArgumentTypeError('not enough args')
if self.max_length is not None and len(arg_list) > self.max_length:
raise ArgumentTypeError('too many args')
if self.element_type:
arg_list = [self.element_type(arg) for arg in arg_list]
return arg_list
class ArgDict(ArgList):
"""Interpret an argument value as a dict.
Intended to be used as the type= for a flag argument. Splits the string on
commas to get a list, and then splits the items on equals to get a set of
key-value pairs to get a dict.
"""
def __init__(self, value_type=None, spec=None, min_length=0, max_length=None):
"""Initialize an ArgDict.
Args:
value_type: (str)->str, A function to apply to each of the dict values.
spec: {str: (str)->str}, A mapping of expected keys to functions.
The functions are applied to the values. If None, an arbitrary
set of keys will be accepted. If not None, it is an error for the
user to supply a key that is not in the spec.
min_length: int, The minimum number of keys in the dict.
max_length: int, The maximum number of keys in the dict.
Returns:
(str)->{str:str}, A function to parse the dict in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
ValueError: If both value_type and spec are provided.
"""
super(ArgDict, self).__init__(min_length=min_length, max_length=max_length)
if spec and value_type:
raise ValueError('cannot have both spec and sub_type')
self.value_type = value_type
self.spec = spec
def _ApplySpec(self, key, value):
if key in self.spec:
return self.spec[key](value)
else:
raise ArgumentTypeError(
_GenerateErrorMessage(
'valid keys are {0}'.format(
', '.join(sorted(self.spec.keys()))),
user_input=key))
def __call__(self, arg_value): # pylint:disable=missing-docstring
arg_list = super(ArgDict, self).__call__(arg_value)
arg_dict = {}
for arg in arg_list:
split_arg = arg.split('=', 1) # only use the first =
# TODO(user): These exceptions won't present well to the user.
if len(split_arg) != 2:
raise ArgumentTypeError(
('Bad syntax for dict arg: {0}. Please see `gcloud topic escaping` '
'if you would like information on escaping list or dictionary '
'flag values.').format(repr(arg)))
key, value = split_arg
if not key:
raise ArgumentTypeError('bad key for dict arg: '+repr(arg))
if self.value_type:
value = self.value_type(value)
if self.spec:
value = self._ApplySpec(key, value)
arg_dict[key] = value
return arg_dict
# pylint:disable=protected-access
def FloatingListValuesCatcher(
action=argparse._StoreAction, switch_value=None):
"""Create an action for catching floating list values.
Args:
action: argparse.Action, the superclass of the new action.
switch_value: obj, If not none, allow users to specify no value for the
flag. If the flag is given and no value is specified, the switch_value
will be used instead.
Returns:
argparse.Action, an action that will catch list values separated by spaces.
"""
class FloatingListValuesCatcherAction(action):
"""This is to assist with refactoring argument lists.
Provides a error for users who type (or have a script) that specifies a list
with the elements in different arguments. eg.
$ gcloud sql instances create foo --authorized-networks x y
usage: gcloud sql instances create INSTANCE [optional flags]
ERROR: (gcloud.sql.instances.create) argument --authorized-networks: lists
are separated by commas, try "--authorized-networks=x,y"
To do this, with flags that used to (but no longer) have nargs set to take
multiple values we apply an action designed to catch them by transparently
setting nargs to '+', and then making sure only 1 value is provided.
As a caveat, this means that people still cannot put positional arguments
after the flags. So, this is a temporary mechanism designed to inform users,
and we'll remove it eventually.
"""
def __init__(self, *args, **kwargs):
if 'nargs' in kwargs:
# Make sure nothing weird is happening, first. This action is intended
# only for use with --flags that have the type as ArgList or ArgDict,
# and do not set nargs at all.
raise ValueError(
'trying to catch floating lists for a misspecified flag list')
if switch_value is not None:
kwargs['nargs'] = '*'
else:
kwargs['nargs'] = '+'
super(FloatingListValuesCatcherAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if not values and switch_value is not None:
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, switch_value, option_string=option_string)
return
if len(values) > 1:
class ArgShell(object):
"""Class designed to trick argparse into displaying a nice error."""
def __init__(self, name):
self.option_strings = [name]
suggestions = []
if values and isinstance(values[0], dict):
aggregate_value = {}
for valdict in values:
aggregate_value.update(valdict)
suggestions.extend(
['%s=%s' % (k, v) for k, v in valdict.iteritems()])
if values and isinstance(values[0], list):
aggregate_value = []
suggestions.extend(
[','.join(map(str, vallist)) for vallist in values])
for vallist in values:
aggregate_value.extend(vallist)
extras = suggestions[1:]
msg = (
'We noticed that you are using space-separated lists, which are '
'deprecated. '
'Please transition to using comma-separated lists instead '
'(try "{flag} {values}"). '
'If you intend to use [{extras}] as positional arguments, put the '
'flags at the end.').format(
flag=option_string,
values=','.join(suggestions),
extras=', '.join(extras))
# TODO(user): stop warning when we're ready
warn_only = True
if not warn_only:
raise argparse.ArgumentError(ArgShell(option_string), msg)
else:
log.warn(msg)
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, aggregate_value, option_string=option_string)
else:
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, values[0], option_string=option_string)
return FloatingListValuesCatcherAction
|
|
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import lookup
import shutil, unittest, os
from util import result_lines
from test import TemplateTest, template_base, module_base
from test import eq_
try:
import beaker
except:
from nose import SkipTest
raise SkipTest("Beaker is required for these tests.")
from mako.cache import register_plugin, CacheImpl
class MockCacheImpl(CacheImpl):
def __init__(self, cache):
self.cache = cache
self.realcacheimpl = cache._load_impl("beaker")
def get_and_replace(self, key, creation_function, **kw):
self.key = key
self.kwargs = kw.copy()
return self.realcacheimpl.get_and_replace(key, creation_function, **kw)
def put(self, key, value, **kw):
self.key = key
self.kwargs = kw.copy()
self.realcacheimpl.put(key, value, **kw)
def get(self, key, **kw):
self.key = key
self.kwargs = kw.copy()
return self.realcacheimpl.get(key, **kw)
def invalidate(self, key, **kw):
self.key = key
self.kwargs = kw.copy()
self.realcacheimpl.invalidate(key, **kw)
register_plugin("mock", __name__, "MockCacheImpl")
class CacheTest(TemplateTest):
def _install_mock_cache(self, template):
template.cache_impl = 'mock'
return template.cache.impl
def test_def(self):
t = Template("""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""")
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
'this is foo',
'this is foo',
'this is foo',
'callcount: [1]',
]
assert m.kwargs == {}
def test_cache_enable(self):
t = Template("""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
<% callcount[0] += 1 %>
</%def>
${foo()}
${foo()}
callcount: ${callcount}
""", cache_enabled=False)
m = self._install_mock_cache(t)
eq_(t.render().strip(), "callcount: [2]")
def test_nested_def(self):
t = Template("""
<%!
callcount = [0]
%>
<%def name="foo()">
<%def name="bar()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${bar()}
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""")
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
'this is foo',
'this is foo',
'this is foo',
'callcount: [1]',
]
assert m.kwargs == {}
def test_page(self):
t = Template("""
<%!
callcount = [0]
%>
<%page cached="True"/>
this is foo
<%
callcount[0] += 1
%>
callcount: ${callcount}
""")
m = self._install_mock_cache(t)
t.render()
t.render()
assert result_lines(t.render()) == [
"this is foo",
"callcount: [1]"
]
assert m.kwargs == {}
def test_dynamic_key_with_funcargs(self):
t = Template("""
<%def name="foo(num=5)" cached="True" cache_key="foo_${str(num)}">
hi
</%def>
${foo()}
""")
m = self._install_mock_cache(t)
t.render()
t.render()
assert result_lines(t.render()) == ['hi']
assert m.key == "foo_5"
t = Template("""
<%def name="foo(*args, **kwargs)" cached="True" cache_key="foo_${kwargs['bar']}">
hi
</%def>
${foo(1, 2, bar='lala')}
""")
m = self._install_mock_cache(t)
t.render()
assert result_lines(t.render()) == ['hi']
assert m.key == "foo_lala"
t = Template('''
<%page args="bar='hi'" cache_key="foo_${bar}" cached="True"/>
hi
''')
m = self._install_mock_cache(t)
t.render()
assert result_lines(t.render()) == ['hi']
assert m.key == "foo_hi"
def test_dynamic_key_with_imports(self):
lookup = TemplateLookup()
lookup.put_string("foo.html", """
<%!
callcount = [0]
%>
<%namespace file="ns.html" import="*"/>
<%page cached="True" cache_key="${foo}"/>
this is foo
<%
callcount[0] += 1
%>
callcount: ${callcount}
""")
lookup.put_string("ns.html", """""")
t = lookup.get_template("foo.html")
m = self._install_mock_cache(t)
t.render(foo='somekey')
t.render(foo='somekey')
assert result_lines(t.render(foo='somekey')) == [
"this is foo",
"callcount: [1]"
]
assert m.kwargs == {}
def test_fileargs_implicit(self):
l = lookup.TemplateLookup(module_directory=module_base)
l.put_string("test","""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True" cache_type='dbm'>
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""")
m = self._install_mock_cache(l.get_template('test'))
assert result_lines(l.get_template('test').render()) == [
'this is foo',
'this is foo',
'this is foo',
'callcount: [1]',
]
eq_(m.kwargs, {'type':'dbm'})
def test_fileargs_deftag(self):
t = Template("""
<%%!
callcount = [0]
%%>
<%%def name="foo()" cached="True" cache_type='file' cache_dir='%s'>
this is foo
<%%
callcount[0] += 1
%%>
</%%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""" % module_base)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
'this is foo',
'this is foo',
'this is foo',
'callcount: [1]',
]
assert m.kwargs == {'type':'file','dir':module_base}
def test_fileargs_pagetag(self):
t = Template("""
<%%page cache_dir='%s' cache_type='dbm'/>
<%%!
callcount = [0]
%%>
<%%def name="foo()" cached="True">
this is foo
<%%
callcount[0] += 1
%%>
</%%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""" % module_base)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
'this is foo',
'this is foo',
'this is foo',
'callcount: [1]',
]
eq_(m.kwargs, {'dir':module_base, 'type':'dbm'})
def test_args_complete(self):
t = Template("""
<%%def name="foo()" cached="True" cache_timeout="30" cache_dir="%s" cache_type="file" cache_key='somekey'>
this is foo
</%%def>
${foo()}
""" % module_base)
m = self._install_mock_cache(t)
t.render()
eq_(m.kwargs, {'dir':module_base, 'type':'file', 'timeout':30})
t2 = Template("""
<%%page cached="True" cache_timeout="30" cache_dir="%s" cache_type="file" cache_key='somekey'/>
hi
""" % module_base)
m = self._install_mock_cache(t2)
t2.render()
eq_(m.kwargs, {'dir':module_base, 'type':'file', 'timeout':30})
def test_fileargs_lookup(self):
l = lookup.TemplateLookup(cache_dir=module_base, cache_type='file')
l.put_string("test","""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""")
t = l.get_template('test')
m = self._install_mock_cache(t)
assert result_lines(l.get_template('test').render()) == [
'this is foo',
'this is foo',
'this is foo',
'callcount: [1]',
]
eq_(m.kwargs, {'dir':module_base, 'type':'file'})
def test_buffered(self):
t = Template("""
<%!
def a(text):
return "this is a " + text.strip()
%>
${foo()}
${foo()}
<%def name="foo()" cached="True" buffered="True">
this is a test
</%def>
""", buffer_filters=["a"])
assert result_lines(t.render()) == ["this is a this is a test", "this is a this is a test"]
def test_load_from_expired(self):
"""test that the cache callable can be called safely after the
originating template has completed rendering.
"""
t = Template("""
${foo()}
<%def name="foo()" cached="True" cache_timeout="2">
foo
</%def>
""")
import time
x1 = t.render()
time.sleep(3)
x2 = t.render()
assert x1.strip() == x2.strip() == "foo"
def test_cache_uses_current_context(self):
t = Template("""
${foo()}
<%def name="foo()" cached="True" cache_timeout="2">
foo: ${x}
</%def>
""")
import time
x1 = t.render(x=1)
time.sleep(3)
x2 = t.render(x=2)
eq_(x1.strip(), "foo: 1")
eq_(x2.strip(), "foo: 2")
def test_namespace_access(self):
t = Template("""
<%def name="foo(x)" cached="True">
foo: ${x}
</%def>
<%
foo(1)
foo(2)
local.cache.invalidate_def('foo')
foo(3)
foo(4)
%>
""")
assert result_lines(t.render()) == ['foo: 1', 'foo: 1', 'foo: 3', 'foo: 3']
def test_invalidate(self):
t = Template("""
<%%def name="foo()" cached="True">
foo: ${x}
</%%def>
<%%def name="bar()" cached="True" cache_type='dbm' cache_dir='%s'>
bar: ${x}
</%%def>
${foo()} ${bar()}
""" % module_base)
assert result_lines(t.render(x=1)) == ["foo: 1", "bar: 1"]
assert result_lines(t.render(x=2)) == ["foo: 1", "bar: 1"]
t.cache.invalidate_def('foo')
assert result_lines(t.render(x=3)) == ["foo: 3", "bar: 1"]
t.cache.invalidate_def('bar')
assert result_lines(t.render(x=4)) == ["foo: 3", "bar: 4"]
t = Template("""
<%%page cached="True" cache_type="dbm" cache_dir="%s"/>
page: ${x}
""" % module_base)
assert result_lines(t.render(x=1)) == ["page: 1"]
assert result_lines(t.render(x=2)) == ["page: 1"]
t.cache.invalidate_body()
assert result_lines(t.render(x=3)) == ["page: 3"]
assert result_lines(t.render(x=4)) == ["page: 3"]
def test_custom_args_def(self):
t = Template("""
<%def name="foo()" cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob">
</%def>
${foo()}
""")
m = self._install_mock_cache(t)
t.render()
eq_(m.kwargs, {'region':'myregion', 'timeout':50, 'foo':'foob'})
def test_custom_args_block(self):
t = Template("""
<%block name="foo" cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob">
</%block>
""")
m = self._install_mock_cache(t)
t.render()
eq_(m.kwargs, {'region':'myregion', 'timeout':50, 'foo':'foob'})
def test_custom_args_page(self):
t = Template("""
<%page cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob"/>
""")
m = self._install_mock_cache(t)
t.render()
eq_(m.kwargs, {'region':'myregion', 'timeout':50, 'foo':'foob'})
|
|
# Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""BibTeX-like name formatting.
>>> name = 'Charles Louis Xavier Joseph de la Vallee Poussin'
>>> print format_name(name, '{vv~}{ll}{, jj}{, f.}')
de~la Vallee~Poussin, C.~L. X.~J.
>>> name = 'abc'
>>> print format_name(name, '{vv~}{ll}{, jj}{, f.}')
abc
>>> name = 'Jean-Pierre Hansen'
>>> print format_name(name, '{ff~}{vv~}{ll}{, jj}')
Jean-Pierre Hansen
>>> print format_name(name, '{f.~}{vv~}{ll}{, jj}')
J.-P. Hansen
>>> name = 'F. Phidias Phony-Baloney'
>>> print format_name(name, '{v{}}{l}')
P.-B
>>> print format_name(name, '{v{}}{l.}')
P.-B.
>>> print format_name(name, '{v{}}{l{}}')
PB
"""
import re
from pybtex.database import Person
from pybtex.utils import deprecated
from pybtex.bibtex.utils import bibtex_len, bibtex_abbreviate
from pybtex.scanner import (
Scanner, Pattern, Literal,
PybtexSyntaxError, PrematureEOF
)
class BibTeXNameFormatError(Exception):
pass
class Text(object):
def __init__(self, text):
self.text = text
def __repr__(self):
return u'{0}({1})'.format(type(self).__name__, repr(self.text))
def __eq__(self, other):
return type(self) == type(other) and self.text == other.text
def format(self, person):
return self.text
def to_python(self):
return repr(self.text)
class NamePart(object):
def __init__(self, format_list):
pre_text, format_chars, self.delimiter, post_text = format_list
if not format_chars and pre_text and not post_text:
post_text = pre_text
pre_text = ''
if post_text.endswith('~~'):
self.tie = '~~'
elif post_text.endswith('~'):
self.tie = '~'
else:
self.tie = None
self.pre_text = pre_text
self.post_text = post_text.rstrip('~')
if not format_chars:
self.format_char = ''
self.abbreviate = False
else:
l = len(format_chars)
if l == 1:
self.abbreviate = True
elif l == 2 and format_chars[0] == format_chars[1]:
self.abbreviate = False
else:
raise BibTeXNameFormatError('invalid format string')
self.format_char = format_chars[0]
def __repr__(self):
format_chars = self.format_char * (1 if self.abbreviate else 2)
format_list = [self.pre_text, format_chars, self.delimiter, self.post_text]
return u'{0}({1})'.format(type(self).__name__, repr(format_list))
def __eq__(self, other):
return (
type(self) == type(other)
and self.pre_text == other.pre_text
and self.format_char == other.format_char
and self.abbreviate == other.abbreviate
and self.delimiter == other.delimiter
and self.post_text == other.post_text
)
types = {
'f': 'bibtex_first',
'l': 'last',
'v': 'prelast',
'j': 'lineage'
}
def format(self, person):
names = getattr(person, self.types[self.format_char])() if self.format_char else []
if self.format_char and not names:
return ''
if self.abbreviate:
names = [bibtex_abbreviate(name, self.delimiter) for name in names]
if self.delimiter is None:
if self.abbreviate:
names = join(names, '.~', '. ')
else:
names = join(names)
else:
names = self.delimiter.join(names)
formatted_part = self.pre_text + names + self.post_text
if self.tie == '~':
discretionary = tie_or_space(formatted_part)
elif self.tie == '~~':
discretionary = '~'
else:
discretionary = ''
return formatted_part + discretionary
def to_python(self):
from pybtex.style.names import name_part
class NamePart(object):
def __init__(self, part, abbr=False):
self.part = part
self.abbr = abbr
def __repr__(self):
abbr = 'abbr' if self.abbr else ''
return 'person.%s(%s)' % (self.part, abbr)
kwargs = {}
if self.pre_text:
kwargs['before'] = self.pre_text
if self.tie:
kwargs['tie'] = True
return repr(name_part(**kwargs) [
NamePart(self.types[self.format_char], self.abbreviate)
])
class NameFormat(object):
"""
BibTeX name format string.
>>> f = NameFormat('{ff~}{vv~}{ll}{, jj}')
>>> f.parts == [
... NamePart(['', 'ff', None, '']),
... NamePart(['', 'vv', None, '']),
... NamePart(['', 'll', None, '']),
... NamePart([', ', 'jj', None, ''])
... ]
True
>>> f = NameFormat('{{ }ff~{ }}{vv~{- Test text here -}~}{ll}{, jj}')
>>> f.parts == [
... NamePart(['{ }', 'ff', None, '~{ }']),
... NamePart(['', 'vv', None, '~{- Test text here -}']),
... NamePart(['', 'll', None, '']),
... NamePart([u', ', 'jj', None, ''])
... ]
True
>>> f = NameFormat('abc def {f~} xyz {f}?')
>>> f.parts == [
... Text('abc def '),
... NamePart(['', 'f', None, '']),
... Text(' xyz '),
... NamePart(['', 'f', None, '']),
... Text('?'),
... ]
True
>>> f = NameFormat('{{abc}{def}ff~{xyz}{#@$}}')
>>> f.parts == [NamePart(['{abc}{def}', 'ff', None, '~{xyz}{#@$}'])]
True
>>> f = NameFormat('{{abc}{def}ff{xyz}{#@${}{sdf}}}')
>>> f.parts == [NamePart(['{abc}{def}', 'ff', 'xyz', '{#@${}{sdf}}'])]
True
>>> f = NameFormat('{f.~}')
>>> f.parts == [NamePart(['', 'f', None, '.'])]
True
>>> f = NameFormat('{f~.}')
>>> f.parts == [NamePart(['', 'f', None, '~.'])]
True
>>> f = NameFormat('{f{.}~}')
>>> f.parts == [NamePart(['', 'f', '.', ''])]
True
"""
def __init__(self, format):
self.format_string = format
self.parts = list(NameFormatParser(format).parse())
def format(self, name):
person = Person(name)
return ''.join(part.format(person) for part in self.parts)
def to_python(self):
"""Convert BibTeX name format to Python (inexactly)."""
from pybtex.style.names import join
parts = ',\n'.join(' ' * 8 + part.to_python() for part in self.parts)
comment = ' ' * 4 + (
'"""Format names similarly to %s in BibTeX."""' % self.format_string
)
body = ' ' * 4 + 'return join [\n%s,\n]' % parts
return '\n'.join([
'def format_names(person, abbr=False):',
comment,
body,
])
enough_chars = 3
def tie_or_space(word, tie='~', space = ' '):
if bibtex_len(word) < enough_chars:
return tie
else:
return space
def join(words, tie='~', space=' '):
"""Join some words, inserting ties (~) when nessessary.
Ties are inserted:
- after the first word, if it is short
- before the last word
Otherwise space is inserted.
Should produce the same oubput as BibTeX.
>>> print join(['a', 'long', 'long', 'road'])
a~long long~road
>>> print join(['very', 'long', 'phrase'])
very long~phrase
"""
if len(words) <= 2:
return tie.join(words)
else:
return (words[0] + tie_or_space(words[0], tie, space) +
space.join(words[1:-1]) +
tie + words[-1])
def format_name(name, format):
return NameFormat(format).format(name)
@deprecated('0.16', 'use format_name() instead')
def format(name, format):
return format_name(name, format)
class UnbalancedBraceError(PybtexSyntaxError):
def __init__(self, parser):
message = u'name format string "{0}" has unbalanced braces'.format(parser.text)
super(UnbalancedBraceError, self).__init__(message, parser)
class NameFormatParser(Scanner):
LBRACE = Literal(u'{')
RBRACE = Literal(u'}')
TEXT = Pattern(ur'[^{}]+', 'text')
NON_LETTERS = Pattern(ur'[^{}\w]|\d+', 'non-letter characters', flags=re.IGNORECASE | re.UNICODE)
FORMAT_CHARS = Pattern(ur'[^\W\d_]+', 'format chars', flags=re.IGNORECASE | re.UNICODE)
lineno = None
def parse(self):
while True:
try:
result = self.parse_toplevel()
yield result
except EOFError:
break
def parse_toplevel(self):
token = self.required([self.TEXT, self.LBRACE, self.RBRACE], allow_eof=True)
if token.pattern is self.TEXT:
return Text(token.value)
elif token.pattern is self.LBRACE:
return NamePart(self.parse_name_part())
elif token.pattern is self.RBRACE:
raise UnbalancedBraceError(self)
def parse_braced_string(self):
while True:
try:
token = self.required([self.TEXT, self.RBRACE, self.LBRACE])
except PrematureEOF:
raise UnbalancedBraceError(self)
if token.pattern is self.TEXT:
yield token.value
elif token.pattern is self.RBRACE:
break
elif token.pattern is self.LBRACE:
yield u'{{{0}}}'.format(''.join(self.parse_braced_string()))
else:
raise ValueError(token)
def parse_name_part(self):
verbatim_prefix = []
format_chars = None
verbatim_postfix = []
verbatim = verbatim_prefix
delimiter = None
def check_format_chars(value):
value = value.lower()
if (
format_chars is not None
or len(value) not in [1, 2]
or value[0] != value[-1]
or value[0] not in 'flvj'
):
raise PybtexSyntaxError(u'name format string "{0}" has illegal brace-level-1 letters: {1}'.format(self.text, token.value), self)
while True:
try:
token = self.required([self.LBRACE, self.NON_LETTERS, self.FORMAT_CHARS, self.RBRACE])
except PrematureEOF:
raise UnbalancedBraceError(self)
if token.pattern is self.LBRACE:
verbatim.append(u'{{{0}}}'.format(''.join(self.parse_braced_string())))
elif token.pattern is self.FORMAT_CHARS:
check_format_chars(token.value)
format_chars = token.value
verbatim = verbatim_postfix
if self.optional([self.LBRACE]):
delimiter = ''.join(self.parse_braced_string())
elif token.pattern is self.NON_LETTERS:
verbatim.append(token.value)
elif token.pattern is self.RBRACE:
return ''.join(verbatim_prefix), format_chars, delimiter, ''.join(verbatim_postfix)
else:
raise ValueError(token)
def eat_whitespace(self):
pass
|
|
from cloudify.decorators import workflow
from cloudify.workflows import ctx
from cloudify.workflows import tasks as workflow_tasks
from utils import set_state_task
from utils import operation_task
from utils import link_tasks
from utils import CustomContext
from utils import generate_native_node_workflows
from utils import _get_all_nodes
from utils import _get_all_nodes_instances
from utils import _get_all_modified_node_instances
from utils import is_host_node
from workflow import WfStartEvent
from workflow import build_pre_event
# subworkflow 'install' for host 'Compute3'
def install_host_compute3(ctx, graph, custom_context):
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
set_state_task(ctx, graph, 'LinuxFileSystem1', 'started', 'LinuxFileSystem1_started', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'starting', 'LinuxFileSystem1_starting', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'created', 'LinuxFileSystem1_created', custom_context)
custom_context.register_native_delegate_wf_step('Compute3', 'Compute3_install')
set_state_task(ctx, graph, 'LinuxFileSystem1', 'configured', 'LinuxFileSystem1_configured', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'creating', 'LinuxFileSystem1_creating', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'configuring', 'LinuxFileSystem1_configuring', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'initial', 'LinuxFileSystem1_initial', custom_context)
operation_task(ctx, graph, 'LinuxFileSystem1', 'cloudify.interfaces.lifecycle.start', 'start_LinuxFileSystem1', custom_context)
operation_task(ctx, graph, 'LinuxFileSystem1', 'cloudify.interfaces.lifecycle.configure', 'configure_LinuxFileSystem1', custom_context)
custom_context.register_native_delegate_wf_step('Volume1', 'Volume1_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'start_LinuxFileSystem1', 'LinuxFileSystem1_starting', custom_context)
link_tasks(graph, 'LinuxFileSystem1_configuring', 'LinuxFileSystem1_created', custom_context)
link_tasks(graph, 'LinuxFileSystem1_initial', 'Compute3_install', custom_context)
link_tasks(graph, 'LinuxFileSystem1_starting', 'LinuxFileSystem1_configured', custom_context)
link_tasks(graph, 'LinuxFileSystem1_created', 'LinuxFileSystem1_creating', custom_context)
link_tasks(graph, 'configure_LinuxFileSystem1', 'LinuxFileSystem1_configuring', custom_context)
link_tasks(graph, 'LinuxFileSystem1_creating', 'LinuxFileSystem1_initial', custom_context)
link_tasks(graph, 'LinuxFileSystem1_started', 'start_LinuxFileSystem1', custom_context)
link_tasks(graph, 'LinuxFileSystem1_configured', 'configure_LinuxFileSystem1', custom_context)
link_tasks(graph, 'LinuxFileSystem1_initial', 'Volume1_install', custom_context)
# subworkflow 'install' for host 'NonScaledCompute'
def install_host_nonscaledcompute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('NonScaledCompute', 'NonScaledCompute_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
# subworkflow 'install' for host 'AnotherScaleCompute'
def install_host_anotherscalecompute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('AnotherScaleCompute', 'AnotherScaleCompute_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
# subworkflow 'install' for host 'Compute'
def install_host_compute(ctx, graph, custom_context):
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
set_state_task(ctx, graph, 'FileSystem', 'starting', 'FileSystem_starting', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'configuring', 'FileSystem_configuring', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'created', 'FileSystem_created', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'creating', 'FileSystem_creating', custom_context)
custom_context.register_native_delegate_wf_step('Compute', 'Compute_install')
set_state_task(ctx, graph, 'FileSystem', 'started', 'FileSystem_started', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'initial', 'FileSystem_initial', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.create', 'create_FileSystem', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'configured', 'FileSystem_configured', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.configure', 'configure_FileSystem', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.start', 'start_FileSystem', custom_context)
custom_context.register_native_delegate_wf_step('BlockStorage', 'BlockStorage_install')
custom_context.register_native_delegate_wf_step('BlockStorage2', 'BlockStorage2_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'start_FileSystem', 'FileSystem_starting', custom_context)
link_tasks(graph, 'configure_FileSystem', 'FileSystem_configuring', custom_context)
link_tasks(graph, 'FileSystem_configuring', 'FileSystem_created', custom_context)
link_tasks(graph, 'create_FileSystem', 'FileSystem_creating', custom_context)
link_tasks(graph, 'FileSystem_initial', 'Compute_install', custom_context)
link_tasks(graph, 'FileSystem_creating', 'FileSystem_initial', custom_context)
link_tasks(graph, 'FileSystem_created', 'create_FileSystem', custom_context)
link_tasks(graph, 'FileSystem_starting', 'FileSystem_configured', custom_context)
link_tasks(graph, 'FileSystem_configured', 'configure_FileSystem', custom_context)
link_tasks(graph, 'FileSystem_started', 'start_FileSystem', custom_context)
link_tasks(graph, 'FileSystem_initial', 'BlockStorage_install', custom_context)
link_tasks(graph, 'FileSystem_initial', 'BlockStorage2_install', custom_context)
# subworkflow 'uninstall' for host 'Compute3'
def uninstall_host_compute3(ctx, graph, custom_context):
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
operation_task(ctx, graph, 'LinuxFileSystem1', 'cloudify.interfaces.lifecycle.stop', 'stop_LinuxFileSystem1', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'stopping', 'LinuxFileSystem1_stopping', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'deleted', 'LinuxFileSystem1_deleted', custom_context)
custom_context.register_native_delegate_wf_step('Compute3', 'Compute3_uninstall')
set_state_task(ctx, graph, 'LinuxFileSystem1', 'deleting', 'LinuxFileSystem1_deleting', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'stopped', 'LinuxFileSystem1_stopped', custom_context)
custom_context.register_native_delegate_wf_step('Volume1', 'Volume1_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'LinuxFileSystem1_stopped', 'stop_LinuxFileSystem1', custom_context)
link_tasks(graph, 'stop_LinuxFileSystem1', 'LinuxFileSystem1_stopping', custom_context)
link_tasks(graph, 'Volume1_uninstall', 'LinuxFileSystem1_deleted', custom_context)
link_tasks(graph, 'Compute3_uninstall', 'LinuxFileSystem1_deleted', custom_context)
link_tasks(graph, 'LinuxFileSystem1_deleted', 'LinuxFileSystem1_deleting', custom_context)
link_tasks(graph, 'LinuxFileSystem1_deleting', 'LinuxFileSystem1_stopped', custom_context)
# subworkflow 'uninstall' for host 'NonScaledCompute'
def uninstall_host_nonscaledcompute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('NonScaledCompute', 'NonScaledCompute_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
# subworkflow 'uninstall' for host 'AnotherScaleCompute'
def uninstall_host_anotherscalecompute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('AnotherScaleCompute', 'AnotherScaleCompute_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
# subworkflow 'uninstall' for host 'Compute'
def uninstall_host_compute(ctx, graph, custom_context):
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
set_state_task(ctx, graph, 'FileSystem', 'deleting', 'FileSystem_deleting', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'deleted', 'FileSystem_deleted', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'stopped', 'FileSystem_stopped', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'stopping', 'FileSystem_stopping', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.delete', 'delete_FileSystem', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.stop', 'stop_FileSystem', custom_context)
custom_context.register_native_delegate_wf_step('Compute', 'Compute_uninstall')
custom_context.register_native_delegate_wf_step('BlockStorage', 'BlockStorage_uninstall')
custom_context.register_native_delegate_wf_step('BlockStorage2', 'BlockStorage2_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'delete_FileSystem', 'FileSystem_deleting', custom_context)
link_tasks(graph, 'BlockStorage_uninstall', 'FileSystem_deleted', custom_context)
link_tasks(graph, 'Compute_uninstall', 'FileSystem_deleted', custom_context)
link_tasks(graph, 'BlockStorage2_uninstall', 'FileSystem_deleted', custom_context)
link_tasks(graph, 'FileSystem_deleting', 'FileSystem_stopped', custom_context)
link_tasks(graph, 'stop_FileSystem', 'FileSystem_stopping', custom_context)
link_tasks(graph, 'FileSystem_deleted', 'delete_FileSystem', custom_context)
link_tasks(graph, 'FileSystem_stopped', 'stop_FileSystem', custom_context)
def install_host(ctx, graph, custom_context, compute):
options = {}
options['Compute3'] = install_host_compute3
options['NonScaledCompute'] = install_host_nonscaledcompute
options['AnotherScaleCompute'] = install_host_anotherscalecompute
options['Compute'] = install_host_compute
options[compute](ctx, graph, custom_context)
def uninstall_host(ctx, graph, custom_context, compute):
options = {}
options['Compute3'] = uninstall_host_compute3
options['NonScaledCompute'] = uninstall_host_nonscaledcompute
options['AnotherScaleCompute'] = uninstall_host_anotherscalecompute
options['Compute'] = uninstall_host_compute
options[compute](ctx, graph, custom_context)
@workflow
def a4c_install(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('install')))
_a4c_install(ctx, graph, custom_context)
return graph.execute()
@workflow
def a4c_uninstall(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('uninstall')))
_a4c_uninstall(ctx, graph, custom_context)
return graph.execute()
def _a4c_install(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.register_native_delegate_wf_step('BlockStorage', 'BlockStorage_install')
set_state_task(ctx, graph, 'FileSystem', 'starting', 'FileSystem_starting', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'configuring', 'FileSystem_configuring', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'started', 'LinuxFileSystem1_started', custom_context)
custom_context.register_native_delegate_wf_step('Volume1', 'Volume1_install')
set_state_task(ctx, graph, 'FileSystem', 'created', 'FileSystem_created', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'creating', 'FileSystem_creating', custom_context)
custom_context.register_native_delegate_wf_step('Compute', 'Compute_install')
set_state_task(ctx, graph, 'FileSystem', 'started', 'FileSystem_started', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'starting', 'LinuxFileSystem1_starting', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'created', 'LinuxFileSystem1_created', custom_context)
custom_context.register_native_delegate_wf_step('Compute3', 'Compute3_install')
set_state_task(ctx, graph, 'FileSystem', 'initial', 'FileSystem_initial', custom_context)
custom_context.register_native_delegate_wf_step('BlockStorage2', 'BlockStorage2_install')
custom_context.register_native_delegate_wf_step('NonScaledCompute', 'NonScaledCompute_install')
set_state_task(ctx, graph, 'LinuxFileSystem1', 'configured', 'LinuxFileSystem1_configured', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.create', 'create_FileSystem', custom_context)
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_install')
set_state_task(ctx, graph, 'FileSystem', 'configured', 'FileSystem_configured', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'creating', 'LinuxFileSystem1_creating', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.configure', 'configure_FileSystem', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.start', 'start_FileSystem', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'configuring', 'LinuxFileSystem1_configuring', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'initial', 'LinuxFileSystem1_initial', custom_context)
custom_context.register_native_delegate_wf_step('AnotherScaleCompute', 'AnotherScaleCompute_install')
operation_task(ctx, graph, 'LinuxFileSystem1', 'cloudify.interfaces.lifecycle.start', 'start_LinuxFileSystem1', custom_context)
operation_task(ctx, graph, 'LinuxFileSystem1', 'cloudify.interfaces.lifecycle.configure', 'configure_LinuxFileSystem1', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'FileSystem_starting', 'FileSystem_configured', custom_context)
link_tasks(graph, 'FileSystem_configuring', 'FileSystem_created', custom_context)
link_tasks(graph, 'LinuxFileSystem1_started', 'start_LinuxFileSystem1', custom_context)
link_tasks(graph, 'FileSystem_created', 'create_FileSystem', custom_context)
link_tasks(graph, 'FileSystem_creating', 'FileSystem_initial', custom_context)
link_tasks(graph, 'FileSystem_started', 'start_FileSystem', custom_context)
link_tasks(graph, 'LinuxFileSystem1_starting', 'LinuxFileSystem1_configured', custom_context)
link_tasks(graph, 'LinuxFileSystem1_created', 'LinuxFileSystem1_creating', custom_context)
link_tasks(graph, 'FileSystem_initial', 'BlockStorage_install', custom_context)
link_tasks(graph, 'FileSystem_initial', 'BlockStorage2_install', custom_context)
link_tasks(graph, 'FileSystem_initial', 'Compute_install', custom_context)
link_tasks(graph, 'LinuxFileSystem1_configured', 'configure_LinuxFileSystem1', custom_context)
link_tasks(graph, 'create_FileSystem', 'FileSystem_creating', custom_context)
link_tasks(graph, 'FileSystem_configured', 'configure_FileSystem', custom_context)
link_tasks(graph, 'LinuxFileSystem1_creating', 'LinuxFileSystem1_initial', custom_context)
link_tasks(graph, 'configure_FileSystem', 'FileSystem_configuring', custom_context)
link_tasks(graph, 'start_FileSystem', 'FileSystem_starting', custom_context)
link_tasks(graph, 'LinuxFileSystem1_configuring', 'LinuxFileSystem1_created', custom_context)
link_tasks(graph, 'LinuxFileSystem1_initial', 'Compute3_install', custom_context)
link_tasks(graph, 'LinuxFileSystem1_initial', 'Volume1_install', custom_context)
link_tasks(graph, 'start_LinuxFileSystem1', 'LinuxFileSystem1_starting', custom_context)
link_tasks(graph, 'configure_LinuxFileSystem1', 'LinuxFileSystem1_configuring', custom_context)
def _a4c_uninstall(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('FileSystem')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
custom_context.add_customized_wf_node('LinuxFileSystem1')
set_state_task(ctx, graph, 'FileSystem', 'deleting', 'FileSystem_deleting', custom_context)
custom_context.register_native_delegate_wf_step('Volume1', 'Volume1_uninstall')
operation_task(ctx, graph, 'LinuxFileSystem1', 'cloudify.interfaces.lifecycle.stop', 'stop_LinuxFileSystem1', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'deleted', 'FileSystem_deleted', custom_context)
custom_context.register_native_delegate_wf_step('NonScaledCompute', 'NonScaledCompute_uninstall')
set_state_task(ctx, graph, 'FileSystem', 'stopped', 'FileSystem_stopped', custom_context)
set_state_task(ctx, graph, 'FileSystem', 'stopping', 'FileSystem_stopping', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.delete', 'delete_FileSystem', custom_context)
operation_task(ctx, graph, 'FileSystem', 'cloudify.interfaces.lifecycle.stop', 'stop_FileSystem', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'stopping', 'LinuxFileSystem1_stopping', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'deleted', 'LinuxFileSystem1_deleted', custom_context)
custom_context.register_native_delegate_wf_step('Compute', 'Compute_uninstall')
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_uninstall')
custom_context.register_native_delegate_wf_step('Compute3', 'Compute3_uninstall')
custom_context.register_native_delegate_wf_step('AnotherScaleCompute', 'AnotherScaleCompute_uninstall')
custom_context.register_native_delegate_wf_step('BlockStorage', 'BlockStorage_uninstall')
set_state_task(ctx, graph, 'LinuxFileSystem1', 'deleting', 'LinuxFileSystem1_deleting', custom_context)
set_state_task(ctx, graph, 'LinuxFileSystem1', 'stopped', 'LinuxFileSystem1_stopped', custom_context)
custom_context.register_native_delegate_wf_step('BlockStorage2', 'BlockStorage2_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'FileSystem_deleting', 'FileSystem_stopped', custom_context)
link_tasks(graph, 'Volume1_uninstall', 'LinuxFileSystem1_deleted', custom_context)
link_tasks(graph, 'stop_LinuxFileSystem1', 'LinuxFileSystem1_stopping', custom_context)
link_tasks(graph, 'FileSystem_deleted', 'delete_FileSystem', custom_context)
link_tasks(graph, 'FileSystem_stopped', 'stop_FileSystem', custom_context)
link_tasks(graph, 'delete_FileSystem', 'FileSystem_deleting', custom_context)
link_tasks(graph, 'stop_FileSystem', 'FileSystem_stopping', custom_context)
link_tasks(graph, 'LinuxFileSystem1_deleted', 'LinuxFileSystem1_deleting', custom_context)
link_tasks(graph, 'Compute_uninstall', 'FileSystem_deleted', custom_context)
link_tasks(graph, 'Compute3_uninstall', 'LinuxFileSystem1_deleted', custom_context)
link_tasks(graph, 'BlockStorage_uninstall', 'FileSystem_deleted', custom_context)
link_tasks(graph, 'LinuxFileSystem1_deleting', 'LinuxFileSystem1_stopped', custom_context)
link_tasks(graph, 'LinuxFileSystem1_stopped', 'stop_LinuxFileSystem1', custom_context)
link_tasks(graph, 'BlockStorage2_uninstall', 'FileSystem_deleted', custom_context)
def _get_scaling_group_name_from_node_id(ctx, node_id):
scaling_groups=ctx.deployment.scaling_groups
for group_name, scaling_group in ctx.deployment.scaling_groups.iteritems():
for member in scaling_group['members']:
if member == node_id:
ctx.logger.info("Node {} found in scaling group {}".format(node_id, group_name))
return group_name
return None
@workflow
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
delta = int(delta)
scalable_entity_name = _get_scaling_group_name_from_node_id(ctx, node_id)
scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
if scalable_entity_name:
curr_num_instances = scaling_group['properties']['current_instances']
planned_num_instances = curr_num_instances + delta
scale_id = scalable_entity_name
else:
scaled_node = ctx.get_node(scalable_entity_name)
if not scaled_node:
raise ValueError("Node {0} doesn't exist".format(scalable_entity_name))
if not is_host_node(scaled_node):
raise ValueError("Node {0} is not a host. This workflow can only scale hosts".format(scalable_entity_name))
if delta == 0:
ctx.logger.info('delta parameter is 0, so no scaling will take place.')
return
curr_num_instances = scaled_node.number_of_instances
planned_num_instances = curr_num_instances + delta
scale_id = scaled_node.id
if planned_num_instances < 1:
raise ValueError('Provided delta: {0} is illegal. current number of'
'instances of node/group {1} is {2}'
.format(delta, scalable_entity_name, curr_num_instances))
modification = ctx.deployment.start_modification({
scale_id: {
'instances': planned_num_instances
}
})
ctx.logger.info('Deployment modification started. [modification_id={0} : {1}]'.format(modification.id, dir(modification)))
try:
if delta > 0:
ctx.logger.info('Scaling host/group {0} adding {1} instances'.format(scalable_entity_name, delta))
added_and_related = _get_all_nodes(modification.added)
added = _get_all_modified_node_instances(added_and_related, 'added')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'install')))
custom_context = CustomContext(ctx, added, added_and_related)
install_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Scale failed. Uninstalling node/group {0}'.format(scalable_entity_name))
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
try:
custom_context = CustomContext(ctx, added, added_and_related)
uninstall_host(ctx, graph, custom_context, scalable_entity_name)
graph.execute()
except:
ctx.logger.error('Node {0} uninstallation following scale failure has failed'.format(scalable_entity_name))
raise
else:
ctx.logger.info('Unscaling host/group {0} removing {1} instances'.format(scalable_entity_name, delta))
removed_and_related = _get_all_nodes(modification.removed)
removed = _get_all_modified_node_instances(removed_and_related, 'removed')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'uninstall')))
custom_context = CustomContext(ctx, removed, removed_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Unscale failed.')
raise
except:
ctx.logger.warn('Rolling back deployment modification. [modification_id={0}]'.format(modification.id))
try:
modification.rollback()
except:
ctx.logger.warn('Deployment modification rollback failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
raise
else:
try:
modification.finish()
except:
ctx.logger.warn('Deployment modification finish failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
@workflow
def a4c_heal(
ctx,
node_instance_id,
diagnose_value='Not provided',
**kwargs):
"""Reinstalls the whole subgraph of the system topology
The subgraph consists of all the nodes that are hosted in the
failing node's compute and the compute itself.
Additionally it unlinks and establishes appropriate relationships
:param ctx: cloudify context
:param node_id: failing node's id
:param diagnose_value: diagnosed reason of failure
"""
ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
.format(node_instance_id, diagnose_value))
failing_node = ctx.get_node_instance(node_instance_id)
host_instance_id = failing_node._node_instance.host_id
failing_node_host = ctx.get_node_instance(host_instance_id)
node_id = failing_node_host.node_id
subgraph_node_instances = failing_node_host.get_contained_subgraph()
added_and_related = _get_all_nodes(ctx)
try:
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'uninstall')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
graph.execute()
except:
ctx.logger.error('Uninstall while healing failed.')
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'install')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
install_host(ctx, graph, custom_context, node_id)
graph.execute()
#following code can be pasted in src/test/python/workflows/context.py for simulation
#def _build_nodes(ctx):
#types = []
#types.append('alien.cloudify.openstack.nodes.Volume')
#types.append('alien.cloudify.openstack.nodes.DeletableVolume')
#types.append('tosca.nodes.BlockStorage')
#types.append('tosca.nodes.Root')
#node_BlockStorage2 = _build_node(ctx, 'BlockStorage2', types, 1)
#types = []
#types.append('alien.nodes.openstack.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_Compute3 = _build_node(ctx, 'Compute3', types, 1)
#types = []
#types.append('alien.nodes.openstack.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_NonScaledCompute = _build_node(ctx, 'NonScaledCompute', types, 1)
#types = []
#types.append('alien.cloudify.openstack.nodes.Volume')
#types.append('alien.cloudify.openstack.nodes.DeletableVolume')
#types.append('tosca.nodes.BlockStorage')
#types.append('tosca.nodes.Root')
#node_BlockStorage = _build_node(ctx, 'BlockStorage', types, 1)
#types = []
#types.append('alien.nodes.openstack.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_AnotherScaleCompute = _build_node(ctx, 'AnotherScaleCompute', types, 1)
#types = []
#types.append('alien.cloudify.openstack.nodes.Volume')
#types.append('alien.cloudify.openstack.nodes.DeletableVolume')
#types.append('tosca.nodes.BlockStorage')
#types.append('tosca.nodes.Root')
#node_Volume1 = _build_node(ctx, 'Volume1', types, 1)
#types = []
#types.append('fastconnect.nodes.SoftwareTest4HSS')
#types.append('tosca.nodes.SoftwareComponent')
#types.append('tosca.nodes.Root')
#node_FileSystem = _build_node(ctx, 'FileSystem', types, 1)
#types = []
#types.append('alien.nodes.LinuxFileSystem')
#types.append('tosca.nodes.SoftwareComponent')
#types.append('tosca.nodes.Root')
#node_LinuxFileSystem1 = _build_node(ctx, 'LinuxFileSystem1', types, 1)
#types = []
#types.append('alien.nodes.openstack.PublicNetwork')
#types.append('alien.nodes.PublicNetwork')
#types.append('tosca.nodes.Network')
#types.append('tosca.nodes.Root')
#node_NetPub = _build_node(ctx, 'NetPub', types, 1)
#types = []
#types.append('alien.nodes.openstack.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_Compute = _build_node(ctx, 'Compute', types, 1)
#_add_relationship(node_BlockStorage2, node_Compute)
#_add_relationship(node_Compute3, node_NetPub)
#_add_relationship(node_NonScaledCompute, node_NetPub)
#_add_relationship(node_BlockStorage, node_Compute)
#_add_relationship(node_AnotherScaleCompute, node_NetPub)
#_add_relationship(node_Volume1, node_Compute3)
#_add_relationship(node_FileSystem, node_BlockStorage2)
#_add_relationship(node_FileSystem, node_Compute)
#_add_relationship(node_FileSystem, node_BlockStorage)
#_add_relationship(node_LinuxFileSystem1, node_Volume1)
#_add_relationship(node_LinuxFileSystem1, node_Compute3)
#_add_relationship(node_Compute, node_NetPub)
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility to generate dcid for statistical variables."""
import copy
import re
import os
import sys
#pylint: disable=wrong-import-position
#pylint: disable=import-error
# Allows the following module imports to work when running as a script
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_SCRIPT_PATH, '.')) # For soc_codes_names
from soc_codes_names import SOC_MAP
#pylint: enable=wrong-import-position
#pylint: enable=import-error
# Global constants
# Regex to match the quantity notations - [value quantity], [quantity value]
# Example matches: [2 Person], [Person 2]
_QUANTITY_REGEX_1 = re.compile(
r'\[(?P<value>-|-?\d+(\.\d+)?) (?P<quantity>[A-Za-z_/\d]+)\]')
_QUANTITY_REGEX_2 = re.compile(
r'\[(?P<quantity>[A-Za-z_/\d]+) (?P<value>-|-?\d+(\.\d+)?)\]')
# Regex to match the quantity range notations -
# [lower_limit upper_limit quantity], [quantity lower_limit upper_limit]
# Example matches: [10000 14999 USDollar], [USDollar 10000 14999]
_QUANTITY_RANGE_REGEX_1 = re.compile(r'\[(?P<lower_limit>-|-?\d+(\.\d+)?) '
r'(?P<upper_limit>-|-?\d+(\.\d+)?) '
r'(?P<quantity>[A-Za-z_/\d]+)\]')
_QUANTITY_RANGE_REGEX_2 = re.compile(r'\[(?P<quantity>[A-Za-z_/\d]+) '
r'(?P<lower_limit>-|-?\d+(\.\d+)?) '
r'(?P<upper_limit>-|-?\d+(\.\d+)?)\]')
# These are the default properties ignored during dcid generation
_DEFAULT_IGNORE_PROPS = ('unit', 'Node', 'memberOf', 'typeOf',
'constraintProperties', 'name', 'description',
'descriptionUrl', 'label', 'url', 'alternateName',
'scalingFactor')
# Regex to match prefixes to be removed from constraints. The regex checks for
# specific prefixes followed by an upper case letter or underscore. This helps
# to avoid false positives like 'USCitizenBornInTheUnitedStates'.
_CONSTRAINT_PREFIX_REGEX = re.compile(
r'(?P<prefix>^(USC|CDC|DAD|BLS|NCES|ACSED|UCR))(?P<ucase_uscore>[A-Z_])')
# Mutliple values can be assigned to a property by separating each value with
# '__' or '&'. To represent ParkOrPlayground for location of crime, we can have
# p=locationOfCrime and v=Park__Playground or v=Park&Playground.
# In the dcid, this will be represented as 'ParkOrPlayground'.
_MULTIPLE_VALUE_SEPARATOR_REGEX = re.compile(r'__|&')
# A mapping of NAICS codes to industry topics
# This map was generated using the code from the _create_naics_map function at
# https://github.com/datacommonsorg/tools/blob/master/stat_var_renaming/stat_var_renaming_constants.py
_NAICS_MAP = {
'00': 'Unclassified',
'11': 'AgricultureForestryFishingHunting',
'21': 'MiningQuarryingOilGasExtraction',
'22': 'Utilities',
'23': 'Construction',
'31': 'Manufacturing',
'32': 'Manufacturing',
'33': 'Manufacturing',
'42': 'WholesaleTrade',
'44': 'RetailTrade',
'45': 'RetailTrade',
'48': 'TransportationWarehousing',
'49': 'TransportationWarehousing',
'51': 'Information',
'52': 'FinanceInsurance',
'53': 'RealEstateRentalLeasing',
'54': 'ProfessionalScientificTechnicalServices',
'55': 'ManagementOfCompaniesEnterprises',
'56': 'AdministrativeSupportWasteManagementRemediationServices',
'61': 'EducationalServices',
'62': 'HealthCareSocialAssistance',
'71': 'ArtsEntertainmentRecreation',
'72': 'AccommodationFoodServices',
'81': 'OtherServices',
'92': 'PublicAdministration',
'99': 'Nonclassifiable',
'10': 'TotalAllIndustries',
'101': 'GoodsProducing',
'1011': 'NaturalResourcesMining',
'1012': 'Construction',
'1013': 'Manufacturing',
'102': 'ServiceProviding',
'1021': 'TradeTransportationUtilities',
'1022': 'Information',
'1023': 'FinancialActivities',
'1024': 'ProfessionalBusinessServices',
'1025': 'EducationHealthServices',
'1026': 'LeisureHospitality',
'1027': 'OtherServices',
'1028': 'PublicAdministration',
'1029': 'Unclassified'
}
# Regex to match NAICS Codes. These codes could be a single code or a range
# Example matches: 53-56, 44
_NAICS_CODE_REGEX = re.compile(r'(\d+-\d+|\d+)')
# Regex to extract the lower and upper ranges in a range of NAICS codes
# Example matches: 53-56, 11-21
_NAICS_RANGE_REGEX = re.compile(r'(?P<lower_limit>\d+)-(?P<upper_limit>\d+)')
# Certain properties have text prepended, appended or replaced in the dcid to
# improve readability. For example, p='householderRace', v='AsianAlone' is
# changed to v='HouseholderRaceAsianAlone'. The initial map was picked from
# https://github.com/datacommonsorg/tools/blob/master/stat_var_renaming/stat_var_renaming_functions.py
# In the map, the keys are properties, the value to the key is a dict which can
# have four keys. The 'prepend' and 'append' keys can be used to prepend and
# append to a property value.The value in 'replace' is replaced with the value
# in 'replacement'.
_PREPEND_APPEND_REPLACE_MAP = {
'languageSpokenAtHome': {
'append': 'SpokenAtHome'
},
'childSchoolEnrollment': {
'prepend': 'Child'
},
'residenceType': {
'prepend': 'ResidesIn'
},
'healthPrevented': {
'prepend': 'Received'
},
'householderAge': {
'prepend': 'HouseholderAge'
},
'householderRace': {
'prepend': 'HouseholderRace'
},
'dateBuilt': {
'append': 'Built'
},
'homeValue': {
'prepend': 'HomeValue'
},
'numberOfRooms': {
'prepend': 'WithTotal'
},
'isic': {
'prepend': 'ISIC'
},
'establishmentOwnership': {
'append': 'Establishment'
},
'householdSize': {
'prepend': 'With'
},
'householdWorkerSize': {
'prepend': 'With'
},
'numberOfVehicles': {
'prepend': 'With'
},
'income': {
'prepend': 'IncomeOf'
},
'grossRent': {
'prepend': 'GrossRent'
},
'healthOutcome': {
'prepend': 'With'
},
'healthPrevention': {
'prepend': 'Received'
},
'propertyTax': {
'prepend': 'YearlyTax'
},
'detailedLevelOfSchool': {
'prepend': 'Detailed'
},
'medicalCondition': {
'prepend': 'Condition'
},
'educationalAttainment': {
'prepend': 'EducationalAttainment'
},
'householderEducationalAttainment': {
'prepend': 'HouseholderEducationalAttainment'
},
'householderRelatedChildrenUnder18Years': {
'prepend': 'Householder',
'replace': 'Child',
'replacement': 'RelatedChildren'
},
'householderOwnChildrenUnder18Years': {
'prepend': 'Householder',
'replace': 'Child',
'replacement': 'OwnChildren'
},
'occupation': {
'append': 'Occupation'
},
'usualHoursWorked': {
'prepend': 'WorkPerWeek'
},
'workPeriod': {
'prepend': 'WorkPerYear'
},
'dateOfEntry': {
'prepend': 'DateOfEntry',
'replace': 'Date',
'replacement': ''
},
'placeOfBirth': {
'prepend': 'PlaceOfBirth'
},
'dateMovedIn': {
'prepend': 'MovedInDate',
'replace': 'Date',
'replacement': ''
},
'bachelorsDegreeMajor': {
'prepend': 'BachelorOf'
},
'biasMotivation': {
'prepend': 'BiasMotivation'
},
'offenderRace': {
'prepend': 'OffenderRace'
},
'offenderEthnicity': {
'prepend': 'OffenderEthnicity'
},
'locationOfCrime': {
'prepend': 'LocationOfCrime'
},
'victimType': {
'prepend': 'VictimType'
}
}
# This is a list of boolean properties
_BOOLEAN_PROPS = [
'hasComputer', 'hasFunctionalToilet', 'isAccessibleForFree',
'isEnergyStored', 'isFDAReferenceStandard', 'isFamilyFriendly',
'isGenomeRepresentationFull', 'isGift', 'isInternetUser',
'isLiquefiedNaturalGasStored', 'isLiveBroadcast', 'isNaturalGasStored',
'isPharmacodynamicRelationship', 'isPharmacokineticRelationship',
'isRefSeqGenBankAssembliesIdentical', 'isHateCrime'
]
# To map stat vars which do not follow the conventions of stat var dcid naming
# The key is the dcid generated by the get_statvar_dcid function. The value is
# the replacement dcid.
_LEGACY_MAP = {
'Count_Person_WithDisability_NoHealthInsurance':
'Count_Person_NoHealthInsurance_WithDisability',
'Count_Person_NoDisability_NoHealthInsurance':
'Count_Person_NoHealthInsurance_NoDisability'
}
def _capitalize_process(word: str) -> str:
"""Capitalizes, removes namespaces, measurement constraint prefixes and
underscores from a word.
Manual upper casing is preferred compared to the builtin function
str.capitalize() because we want to change only the case of the first
character and ignore the case of other characters. Firstly, all namespaces
are removed from the string. Then, constraint prefixes and underscores
are removed. Lastly, the first character is upper cased.
Args:
word: A string literal to capitalize and process.
Returns:
Returns a string that can be used in dcid generation.
Returns None if the string is empty.
"""
if word:
# Removing namespaces
word = word[word.find(':') + 1:]
# Removing constraint prefixes and replacing __ or & with 'Or'
word_list = _MULTIPLE_VALUE_SEPARATOR_REGEX.split(word)
for idx, w in enumerate(word_list):
word_list[idx] = _CONSTRAINT_PREFIX_REGEX.sub(
r'\g<ucase_uscore>', w)
word = 'Or'.join(word_list)
# Removing all underscores
word = word.replace('_', '')
# Upper casing the first character
word = word[0].upper() + word[1:]
return word
return None
def _generate_quantity_range_name(match_dict: dict) -> str:
"""Generate a name for a quantity range.
Args:
match_dict: A dictionary containing quantity range regex groups.
Expected syntax of match_dict is
{
'lower_limit': <value>,
'upper_limit': <value>,
'quantity': <value>
}
Returns:
A string representing the quantity range name to be used in the dcid.
Returns None if any of the expected keys are not in the dictionary.
"""
try:
lower_limit = match_dict['lower_limit']
upper_limit = match_dict['upper_limit']
quantity = match_dict['quantity']
except KeyError:
return None
# Joining word to be used when upper_limit or lower_limit is '-'
ul_conjunction = 'More'
ll_conjunction = 'Less'
quantity = _capitalize_process(quantity)
if quantity == 'Date': # Special case
ul_conjunction = 'Later'
ll_conjunction = 'Earlier'
if upper_limit == '-':
return f'{lower_limit}Or{ul_conjunction}{quantity}'
if lower_limit == '-':
return f'{upper_limit}Or{ll_conjunction}{quantity}'
return f'{lower_limit}To{upper_limit}{quantity}'
def _naics_code_to_name(naics_val: str) -> str:
"""Converts NAICS codes to their industry using the _NAICS_MAP.
Args:
naics_val: A NAICS string literal to process.
Expected syntax of naics_val - NAICS/{codes}
'-' can be used to denote range of codes that may or may not belong
to the same industry. For eg, 44-45 will be mapped to 'RetailTrade'.
'_' can be used to represent multiple industries. For eg, 51_52 will
be mapped to 'InformationFinanceInsurance'. A combination of '-' and
'_' is acceptable.
Returns:
A string with all NAICS codes changed to their respective industry.
This string can be used in dcid generation. Returns None if the string
is empty or if the string does not follow the expected syntax.
"""
# Helper function to process NAICS ranges
def _process_naics_range(range_str: str) -> str:
industry_str = ''
match = _NAICS_RANGE_REGEX.search(range_str)
m_dict = match.groupdict()
lower_limit = int(m_dict['lower_limit'])
upper_limit = int(m_dict['upper_limit'])
prev_str = None # To ensure the same industry is not added twice
for code in range(lower_limit, upper_limit + 1):
code_str = str(code)
if code_str in _NAICS_MAP and prev_str != _NAICS_MAP[code_str]:
industry_str = industry_str + _NAICS_MAP[code_str]
prev_str = _NAICS_MAP[code_str]
else:
continue
return industry_str
if naics_val:
processed_str = 'NAICS'
# Remove namespaces
naics_val = naics_val[naics_val.find(':') + 1:]
# Strip NAICS/
naics_val = naics_val.replace('NAICS/', '')
matches = _NAICS_CODE_REGEX.findall(naics_val)
if not matches:
return None
for match_str in matches:
if match_str.find('-') != -1: # Range
industry_str = _process_naics_range(match_str)
else:
industry_str = _NAICS_MAP[match_str]
processed_str = processed_str + industry_str
return processed_str
return None
def _soc_code_to_name(soc_val: str) -> str:
"""Converts SOCv2018 codes to their industry using the SOC_MAP from
soc_codes_names.py
Args:
soc_val: A SOCv2018 string literal to process.
Expected syntax of soc_val - SOCv2018/{code}
Returns:
A string with SOC code changed to it's occupation.
This string can be used in dcid generation. Returns the original string
if the code is not in the SOC_MAP. Returns None if the string is empty.
"""
if soc_val:
processed_str = soc_val
# Remove namespaces
soc_val_ns_removed = soc_val[soc_val.find(':') + 1:]
# Strip SOCv2018/ to get the code
soc_code = soc_val_ns_removed.replace('SOCv2018/', '')
if soc_code in SOC_MAP:
processed_str = 'SOC' + SOC_MAP[soc_code]
return processed_str
return None
def _prepend_append_replace(word,
prepend='',
append='',
replace='',
replacement=''):
"""Prepends, appends and replaces text in a word.
Args:
word: A string literal to prepend, append or replace on.
prepend: A string literal to prepend to word.
append: A string literal to append to word.
replace: A string literal that repersents a substring in word to be
replaced.
replacement: A string literal. In word, all occurances of replace will
be changed to replacement.
Returns:
A string after appending, prepending and replacing to word.
"""
if replace:
word = word.replace(replace, replacement)
if prepend:
word = prepend + word
if append:
word = word + append
return word
def _generate_quantity_name(match_dict: dict) -> str:
"""Generate a name for a quantity.
Args:
match_dict: A dictionary containing quantity regex groups.
Expected syntax of match_dict
{
'value': <value>,
'quantity': <value>
}
Returns:
A string representing the quantity name to be used in the dcid.
Returns None if any of the expected keys are not in the dictionary.
"""
try:
value = match_dict['value']
quantity = match_dict['quantity']
except KeyError:
return None
quantity = _capitalize_process(quantity)
return f'{value}{quantity}'
def _generate_boolean_value_name(prop: str, value: str) -> str:
"""Generates a name given a boolean property and value.
Args:
prop: A string literal representing the boolean property name.
value: A string literal representing the boolean property value.
Returns:
A string that can be used in dcid generation
"""
if value in ('True', 'False'):
constraint_value = value == "True"
pop = None
prefix = None
if prop.startswith("has"):
pop = prop[3:]
prefix = "Has" if constraint_value else "No"
elif prop.startswith("is"):
pop = prop[2:]
prefix = "Is" if constraint_value else "Not"
else:
assert False, f"Unhandled prefix {prop}"
return prefix + pop
return None
def _process_constraint_property(prop: str, value: str) -> str:
"""Processes constraint property, value and returns a name that can be used
in dcid generation.
Args:
prop: A string literal representing the constraint property name.
value: A string literal representing the constraint property value.
Returns:
A string that can be used in dcid generation.
"""
if 'NAICS' in value:
name = _naics_code_to_name(value)
elif 'SOCv2018/' in value:
name = _soc_code_to_name(value)
elif prop in _BOOLEAN_PROPS:
name = _generate_boolean_value_name(prop, value)
else:
match1 = _QUANTITY_RANGE_REGEX_1.match(value)
match2 = _QUANTITY_RANGE_REGEX_2.match(value)
if match1 or match2: # Quantity Range
m_dict = match1.groupdict() if match1 else match2.groupdict()
name = _generate_quantity_range_name(m_dict)
else:
match1 = _QUANTITY_REGEX_1.match(value)
match2 = _QUANTITY_REGEX_2.match(value)
if match1 or match2: # Quantity
m_dict = match1.groupdict() if match1 else match2.groupdict()
name = _generate_quantity_name(m_dict)
else:
name = _capitalize_process(value)
if prop in _PREPEND_APPEND_REPLACE_MAP:
name = _prepend_append_replace(name,
**_PREPEND_APPEND_REPLACE_MAP[prop])
return name
def get_statvar_dcid(stat_var_dict: dict, ignore_props: list = None) -> str:
"""Generates the dcid given a statistical variable.
The generated dcid will follow the pattern
<statType>_<measuredProp>_<populationType>_<constraintVal1>_<constraintVal2>
1. measurementQualifier is added as a prefix to the dcid.
2. statType is included when it is not measuredValue.
3. measurementDenominator is added as a suffix to the dcid.
4. Constraints are sorted alphabetically based on the prop and values are
added to the dcid.
5. Existing dcids may not follow the above conventions. The _LEGACY_MAP maps
generated dcids to their existing dcid.
6. NAICS and SOC codes are replaced with their industry and occupation names
respectively. See _NAICS_MAP and util/soc_codes_names.py for the
mapping.
7. Boolean constraints are replaced by their populations. For example,
p=isInternetUser and v=True/False becomes v=isInternetUser/
notInternetUser. See _BOOLEAN_PROPS for the properties that are
considered for this renaming.
8. Quantities and Quantity Ranges are changed into a name to be used in the
dcid. For example p=age and v=[10 20 Years] becomes v=10To20Years.
9. Certain variables have text prepended or appended to their constraints to
improve readability. See _PREPEND_APPEND_REPLACE_MAP for more details.
Args:
stat_var_dict: A dictionary with property: value of the statistical
variable as key-value pairs.
ignore_props: A list of properties to ignore from stat_var_dict when
generating the dcid. This list of ignore_props will be added to the
default set of properties that are ignored. The ignore_props can be
used to account for dependent properties to ignore when generating
the dcid. For example in the following statVar,
{
populationType: Person
measuredProperty: count
statType: measuredValue
healthInsurance: NoHealthInsurance
armedForceStatus: Civilian
institutionalization: USC_NonInstitutionalized
}
since the healthInsurance property indicates they are Civilian and
USC_NonInstitutionalized, ignore_props can be the list
['armedForceStatus', 'institutionalization']. During the dcid
generation process, these properties will not be considered.
Returns:
A string representing the dcid of the statistical variable.
Caveats:
1. Currently, there is no support for renaming ICD10 cause of death
values and DEA drug names.
2. MeasuredProp=InsuredUnemploymentRate is not changed to
Rate_InsuredUnemployment.
3. The generated dcids can get too long due to the large number of
constraint props. In such cases, manual generation or the
ignore_props arg can be used to exclude a few props from the
generation process. It is recommended to limit the length of
statvar dcids to 80 characters or less.
4. This function does not differentiate between property names and only
uses the values to generate the dcid. Two props having the same
value, say p1=fuel, v1=Coal and p2=energy, v2=Coal will result in
the same dcid. The _PREPEND_APPEND_REPLACE_MAP can be modified to
disambiguate in this case.
"""
# TODO: Renaming cause of death properties
# TODO: Renaming DEA drug names
# TODO: InsuredUmemploymentRate should become Rate_Insured_Unemployment
# Helper function to add a property to the dcid list.
def add_prop_to_list(prop: str, svd: dict, dcid_list: list):
if prop in svd:
token = _capitalize_process(svd[prop])
if token is not None:
dcid_list.append(token)
svd.pop(prop, None)
dcid_list = []
denominator_suffix = ''
svd = copy.deepcopy(stat_var_dict)
if ignore_props is None:
ig_p = _DEFAULT_IGNORE_PROPS
else:
ig_p = copy.deepcopy(ignore_props)
ig_p.extend(_DEFAULT_IGNORE_PROPS)
for prop in ig_p:
svd.pop(prop, None)
# measurementQualifier is added as a prefix
add_prop_to_list('measurementQualifier', svd, dcid_list)
# Add statType if statType is not measuredValue
if ('statType' in svd) and (svd['statType'].find('measuredValue') == -1):
svd['statType'] = svd['statType'].replace('Value', '')
add_prop_to_list('statType', svd, dcid_list)
svd.pop('statType', None)
# Adding measuredProperty and populationType
add_prop_to_list('measuredProperty', svd, dcid_list)
add_prop_to_list('populationType', svd, dcid_list)
# measurementDenominator is added as a suffix
if 'measurementDenominator' in svd:
md = svd['measurementDenominator']
md = md[md.find(':') + 1:] # Removing namespaces
# Special case: PerCapita is directly appended.
if md == 'PerCapita':
denominator_suffix = 'PerCapita'
# MD that are properties (camelCase) are added as Per(MD)
# An example would be the property 'area' in Count_Person_PerArea
elif md[0].islower():
denominator_suffix = 'Per' + _capitalize_process(md)
# Everything else is AsAFractionOf
else:
denominator_suffix = 'AsAFractionOf_' + md
svd.pop('measurementDenominator', None)
# Adding constraint properties in alphabetical order
constraint_props = sorted(svd.keys(), key=str.casefold)
for prop in constraint_props:
name = _process_constraint_property(prop, svd[prop])
dcid_list.append(name)
if denominator_suffix:
dcid_list.append(denominator_suffix)
dcid = '_'.join(dcid_list)
dcid = _LEGACY_MAP.get(dcid, dcid)
return dcid
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import six
import paddle.fluid.core as core
import paddle.fluid.proto.framework_pb2 as framework_pb2
def get_all_op_protos():
"""
Get all registered op proto from PaddlePaddle C++ end.
:return: A list of registered OpProto.
"""
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
ret_values.append(op_proto)
return ret_values
def is_str(s):
return isinstance(s, six.string_types)
class OpDescCreationMethod(object):
"""
Convert the user's input(only keyword arguments are supported) to OpDesc
based on the OpProto.
:param op_proto: The OpProto object.
:type op_proto: op_proto_pb2.OpProto
"""
def __init__(self, op_proto):
if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError(
"Type of op_proto should be OpProto in PaddlePaddle.")
self.__op_proto__ = op_proto
def __call__(self, *args, **kwargs):
"""
Convert user's input to OpDesc. Only keyword arguments are supported.
:return: The OpDesc based on user input.
:rtype: op_desc_pb2.OpDesc
"""
if len(args) != 0:
raise ValueError("Only keyword arguments are supported.")
op_desc = framework_pb2.OpDesc()
for input_parameter in self.__op_proto__.inputs:
input_arguments = kwargs.get(input_parameter.name, [])
if is_str(input_arguments):
input_arguments = [input_arguments]
if not input_parameter.duplicable and len(input_arguments) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given." %
(input_parameter.name, len(input_arguments)))
ipt = op_desc.inputs.add()
ipt.parameter = input_parameter.name
ipt.arguments.extend(input_arguments)
for output_parameter in self.__op_proto__.outputs:
output_arguments = kwargs.get(output_parameter.name, [])
if is_str(output_arguments):
output_arguments = [output_arguments]
if not output_parameter.duplicable and len(output_arguments) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given." %
(output_parameter.name, len(output_arguments)))
out = op_desc.outputs.add()
out.parameter = output_parameter.name
out.arguments.extend(output_arguments)
# Types
op_desc.type = self.__op_proto__.type
# Attrs
for attr in self.__op_proto__.attrs:
if attr.generated:
continue
user_defined_attr = kwargs.get(attr.name, None)
if user_defined_attr is not None:
new_attr = op_desc.attrs.add()
new_attr.name = attr.name
new_attr.type = attr.type
if isinstance(user_defined_attr, np.ndarray):
user_defined_attr = user_defined_attr.tolist()
if attr.type == framework_pb2.INT:
new_attr.i = user_defined_attr
elif attr.type == framework_pb2.FLOAT:
new_attr.f = user_defined_attr
elif attr.type == framework_pb2.STRING:
new_attr.s = user_defined_attr
elif attr.type == framework_pb2.BOOLEAN:
new_attr.b = user_defined_attr
elif attr.type == framework_pb2.INTS:
new_attr.ints.extend(user_defined_attr)
elif attr.type == framework_pb2.FLOATS:
new_attr.floats.extend(user_defined_attr)
elif attr.type == framework_pb2.STRINGS:
new_attr.strings.extend(user_defined_attr)
elif attr.type == framework_pb2.BOOLEANS:
new_attr.bools.extend(user_defined_attr)
elif attr.type == framework_pb2.INT_PAIRS:
for p in user_defined_attr:
pair = new_attr.int_pairs.add()
pair.first = p[0]
pair.second = p[1]
else:
raise NotImplementedError(
"A not supported attribute type: %s." % (
str(attr.type)))
return op_desc
@staticmethod
def any_is_true(generator):
"""
Reduce a boolean array to a single boolean parameter. If any element in
the array is True, this function will return True, otherwise False.
"""
for flag in generator:
if flag:
return True
return False
class OpInfo(object):
def __init__(self, name, method, inputs, outputs, attrs):
self.name = name
self.method = method
self.inputs = inputs
self.outputs = outputs
self.attrs = attrs
def create_op_creation_method(op_proto):
"""
Generate op creation method for an OpProto.
"""
method = OpDescCreationMethod(op_proto)
def __impl__(*args, **kwargs):
opdesc = method(*args, **kwargs)
return core.Operator.create(opdesc.SerializeToString())
return OpInfo(
method=__impl__,
name=op_proto.type,
inputs=[(var.name, var.duplicable) for var in op_proto.inputs],
outputs=[(var.name, var.duplicable) for var in op_proto.outputs],
attrs=[attr.name for attr in op_proto.attrs])
class OperatorFactory(object):
def __init__(self):
self.op_methods = dict()
for op_proto in get_all_op_protos():
method = create_op_creation_method(op_proto)
self.op_methods[method.name] = method
def __call__(self, *args, **kwargs):
if "type" in kwargs:
if len(args) != 0:
raise ValueError(
"Except the argument \"type\","
"all of the other arguments should be keyword arguments.")
t = kwargs.pop("type")
else:
if len(args) != 1:
raise ValueError(
"Except the argument \"type\","
"all of the other arguments should be keyword arguments.")
t = args[0]
return self.get_op_info(t).method(**kwargs)
def types(self):
return list(self.op_methods.keys())
def get_op_info(self, t):
if t not in self.op_methods:
raise ValueError("The operator: %s is not registered." % t)
return self.op_methods.get(t)
def get_op_input_names(self, type):
return [x[0] for x in self.get_op_info(type).inputs]
def get_op_inputs(self, type):
return self.get_op_info(type).inputs
def get_op_output_names(self, type):
return [x[0] for x in self.get_op_info(type).outputs]
def get_op_outputs(self, type):
return self.get_op_info(type).outputs
def get_op_attr_names(self, type):
return self.get_op_info(type).attrs
class __RecurrentOp__(object):
__proto__ = None
type = "recurrent"
def __init__(self):
# cache recurrent_op's proto
if self.__proto__ is None:
for op_proto in get_all_op_protos():
if op_proto.type == self.type:
self.__proto__ = op_proto
def __call__(self, *args, **kwargs):
if self.type not in args and "type" not in kwargs:
kwargs["type"] = self.type
# create proto
create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs)
# create rnnop
return core.RecurrentOp.create(proto.SerializeToString())
class __DynamicRecurrentOp__(object):
__proto__ = None
type = "dynamic_recurrent"
def __init__(self):
# cache recurrent_op's proto
if self.__proto__ is None:
for op_proto in get_all_op_protos():
if op_proto.type == self.type:
self.__proto__ = op_proto
def __call__(self, *args, **kwargs):
if self.type not in args and "type" not in kwargs:
kwargs["type"] = self.type
# create proto
create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs)
# create rnnop
return core.DynamicRecurrentOp.create(proto.SerializeToString())
class __CondOp__(object):
__proto__ = None
type = "cond"
def __init__(self):
# cache recurrent_op's proto
if self.__proto__ is None:
for op_proto in get_all_op_protos():
if op_proto.type == self.type:
self.__proto__ = op_proto
def __call__(self, *args, **kwargs):
if self.type not in args and "type" not in kwargs:
kwargs["type"] = self.type
# create proto
create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs)
# create condop
return core.CondOp.create(proto.SerializeToString())
Operator = OperatorFactory() # The default global factory
RecurrentOp = __RecurrentOp__()
DynamicRecurrentOp = __DynamicRecurrentOp__()
CondOp = __CondOp__()
|
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Plots the blend between low- and high-incidence aero databases.
Examples:
- Variation of aerodynamic coefficients with alpha, at beta = 5 deg:
bazel run //analysis/aero:plot_aero_blend -- \
--alpha_degs '-15.0,15.0,31' \
--beta_degs '5.0,5.0,1'
- Variation of aerodynamic coefficients with beta, at alpha = 5 deg:
bazel run //analysis/aero:plot_aero_blend -- \
--beta_degs '-15.0,15.0,31'
--alpha_degs '5.0,5.0,1'
The program will assert out if both alpha_degs and beta_degs contain more
than one element.
"""
import re
import sys
import gflags
import makani
from makani.lib.python import flag_types
from makani.sim.physics import physics
from makani.system import labels
import matplotlib
matplotlib.use('QT4Agg')
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
import numpy as np
makani.SetRunfilesDirFromBinaryPath()
_AERO_OUTPUTS = ['Cx', 'Cy', 'Cz', 'Cl', 'Cm', 'Cn', 'CL', 'CD']
_AERO_VARS = ['alpha', 'beta', 'p', 'q', 'r', 'ail', 'ele', 'rud']
_VALID_SPEC_DESCRIPTION = (
'Valid specifiers are of the form "<output>" or "d<output>/d<var>", where '
'<output> is one of %s, and <var> is one of %s.' % (_AERO_OUTPUTS,
_AERO_VARS))
gflags.DEFINE_float('re', 5e6, 'Reynolds number.')
gflags.DEFINE_integer('fig_rows', 4, 'Number of rows in figure grid.')
gflags.DEFINE_integer('fig_cols', 4, 'Number of columns in figure grid.')
flag_types.DEFINE_linspace('alpha_degs', '0.0, 12.0, 49',
'Linspace of alpha values in degrees.')
flag_types.DEFINE_linspace('beta_degs', '0.0, 0.0, 1',
'Linspace of beta values in degrees.')
gflags.DEFINE_list('specs',
['CL', 'CD', 'Cy', 'Cl', 'Cm', 'Cn',
'dCL/dalpha', 'dCm/dalpha', 'dCm/dq',
'dCl/dail', 'dCm/dele', 'dCn/drud'],
'Comma-separated list of specifiers for values to plot. '
+ _VALID_SPEC_DESCRIPTION)
gflags.DEFINE_float('thrust_coeff', 0.0, 'Total thrust coefficient.')
FLAGS = gflags.FLAGS
class ProgrammerError(Exception):
"""Indicates that whoever wrote the code screwed up.
But they were nice enough to check whether they did.
"""
pass
def IsDerivativeSpec(spec):
match = re.match(r'^d(\w+)/d(\w+)$', spec)
return (match and match.group(1) in _AERO_OUTPUTS
and match.group(2) in _AERO_VARS)
def IsValidSpec(spec):
return spec in _AERO_OUTPUTS or IsDerivativeSpec(spec)
class BlendDatum(object):
"""A piece of data recorded for low/high-incidence and blended aero models.
This is essentially just a dictionary with keys KEYS. It exists primarily
as documentation.
"""
KEYS = ['low', 'high', 'blended']
def __init__(self, fcn):
self._datum = {key: fcn(key) for key in self.KEYS}
def __getitem__(self, key):
return self._datum[key]
def CalcBlendForceMomentCoeffs(aero_model, alpha, beta=0.0,
omega_hat=(0.0, 0.0, 0.0),
flaps=((0.0,) * labels.kNumFlaps),
reynolds_number=None):
"""Calculates a BlendDatum of force-moment coefficients.
Args:
aero_model: A physics.Aero instance.
alpha: Angle-of-attack [rad].
beta: Sideslip angle [rad].
omega_hat: Length-3 object of body rates [rad/s].
flaps: Length-8 object of flap deflections [rad].
reynolds_number: Reynolds number [#].
Returns:
BlendDatum of force-moment coefficients.
"""
omega_hat_vec3 = physics.Vec3()
omega_hat_vec3.x, omega_hat_vec3.y, omega_hat_vec3.z = omega_hat
flaps_vec = physics.VecWrapper(labels.kNumFlaps)
for i, flap in enumerate(flaps):
flaps_vec.SetValue(i, flap)
if reynolds_number is None:
reynolds_number = FLAGS.re
cfms = BlendDatum(lambda key: physics.ForceMoment())
thrust_coeff = FLAGS.thrust_coeff
aero_model.CalcLowIncidenceCoeffs(alpha, beta, omega_hat_vec3,
flaps_vec.GetVec(), reynolds_number,
cfms['low'].this, thrust_coeff)
aero_model.CalcHighIncidenceCoeffs(alpha, beta, omega_hat_vec3,
flaps_vec.GetVec(), reynolds_number,
cfms['high'].this, thrust_coeff)
aero_model.CalcForceMomentCoeff(alpha, beta, omega_hat_vec3,
flaps_vec.GetVec(), reynolds_number,
cfms['blended'].this, thrust_coeff)
return cfms
def RotBToW(cf_b, alpha, beta):
cf_w = physics.Vec3()
physics.RotBToW(cf_b.this, alpha, beta, cf_w.this)
return cf_w
def AeroOutputGetter(name):
"""Returns a function mapping from (CFM, alpha, beta) to an aero output."""
assert name in _AERO_OUTPUTS, ('Invalid value "%s". Must be one of %s.'
% _AERO_OUTPUTS)
if name == 'Cx':
return lambda cfm, alpha, beta: cfm.force.x
elif name == 'Cy':
return lambda cfm, alpha, beta: cfm.force.y
elif name == 'Cz':
return lambda cfm, alpha, beta: cfm.force.z
if name == 'Cl':
return lambda cfm, alpha, beta: cfm.moment.x
elif name == 'Cm':
return lambda cfm, alpha, beta: cfm.moment.y
elif name == 'Cn':
return lambda cfm, alpha, beta: cfm.moment.z
elif name == 'CL':
return lambda cfm, alpha, beta: -RotBToW(cfm.force, alpha, beta).z
elif name == 'CD':
return lambda cfm, alpha, beta: -RotBToW(cfm.force, alpha, beta).x
else:
raise ProgrammerError('Case "%s" is not handled.' % name)
def CalcAeroOutput(aero_model, spec, alpha, beta,
omega_hat=(0.0, 0.0, 0.0),
flaps=((0.0,) * labels.kNumFlaps)):
getter = AeroOutputGetter(spec)
cfms = CalcBlendForceMomentCoeffs(aero_model, alpha, beta=beta,
omega_hat=omega_hat, flaps=flaps)
return BlendDatum(lambda key: getter(cfms[key], alpha, beta))
def CalcAeroDerivative(aero_model, spec, alpha, beta,
omega_hat=(0.0, 0.0, 0.0),
flaps=((0.0,) * labels.kNumFlaps),
reynolds_number=None):
"""Calculates an aerodynamic derivative.
Args:
aero_model: A physics.Aero instance.
spec: A string specifier for the derivative, of the form
d[output]/d[var]. E.g. 'dCL/dalpha' or 'dCm/dq'. See
_AERO_OUTPUTS and _AERO_VARS for allowed values.
alpha: Angle-of-attack [rad].
beta: Sideslip angle [rad].
omega_hat: Length-3 object of body rates [rad/s].
flaps: Length-8 object of flap deflections [rad].
reynolds_number: Reynolds number [#].
Returns:
BlendDatum of the aerodynamic derivative.
Raises:
ProgrammerError: In case of a coding mistake.
"""
assert IsDerivativeSpec(spec), 'Invalid specifier: "%s"' % spec
if reynolds_number is None:
reynolds_number = FLAGS.re
omega_hat = np.array(omega_hat)
flaps = np.array(list(flaps))
numerator, denominator = spec.split('/')
getter = AeroOutputGetter(numerator[1:])
var = denominator[1:]
# Step size for finite differences. This is in either [rad] or [rad/s],
# depending on what we're differentiating with respect to.
h = 0.01
dalpha, dbeta, domega_hat = 0.0, 0.0, np.zeros(3)
dflaps = np.zeros(labels.kNumFlaps)
if var == 'alpha':
dalpha = h
elif var == 'beta':
dbeta = h
elif var == 'p':
domega_hat[0] = h
elif var == 'q':
domega_hat[1] = h
elif var == 'r':
domega_hat[2] = h
elif var == 'ail':
dflaps = h * np.array([1.0, 1.0, 0.0, 0.0, -1.0, -1.0, 0.0, 0.0])
elif var == 'ele':
dflaps[labels.kFlapEle] = h
elif var == 'rud':
dflaps[labels.kFlapRud] = h
else:
raise ProgrammerError('Case "%s" is not handled.' % var)
cfms_0 = CalcBlendForceMomentCoeffs(aero_model, alpha, beta=beta,
omega_hat=omega_hat, flaps=flaps,
reynolds_number=reynolds_number)
cfms_1 = CalcBlendForceMomentCoeffs(aero_model, alpha + dalpha,
beta=beta + dbeta,
omega_hat=omega_hat + domega_hat,
flaps=flaps + dflaps,
reynolds_number=reynolds_number)
def CalcDerivative(key):
return np.deg2rad((getter(cfms_1[key], alpha + dalpha, beta + dbeta)
- getter(cfms_0[key], alpha, beta)) / h)
return BlendDatum(CalcDerivative)
def Plot(aero_model, alpha_degs, beta_degs, spec):
"""Plots an aerodynamic quantity against angle-of-attack or sideslip.
Args:
aero_model: A physics.Aero instance.
alpha_degs: An array of alpha values [deg].
beta_degs: An array of beta values [deg].
spec: Either an _AERO_OUTPUT or a derivative spec (see CalcAeroDerivative).
This function will assert out if both alpha_degs and beta_degs contain more
than one element.
"""
assert (np.size(alpha_degs) == 1 or np.size(beta_degs) == 1), (
'Invalid inputs. alpha_degs or beta_degs must contain a single element.')
if IsDerivativeSpec(spec):
calc_function = CalcAeroDerivative
else:
calc_function = CalcAeroOutput
blend_data = BlendDatum(lambda key: list())
for alpha_rad in np.deg2rad(alpha_degs):
for beta_rad in np.deg2rad(beta_degs):
datum = calc_function(aero_model, spec, alpha_rad, beta_rad)
for key in BlendDatum.KEYS:
blend_data[key].append(datum[key])
if np.size(alpha_degs) > 1:
abscissa = alpha_degs
x_label = 'Angle-of-attack [deg]'
else:
abscissa = beta_degs
x_label = 'Angle-of-sideslip [deg]'
pyplot.plot(abscissa, blend_data['low'], 'b.:', label='Low incidence')
pyplot.plot(abscissa, blend_data['high'], 'g.:', label='High incidence')
pyplot.plot(abscissa, blend_data['blended'], 'r.-', label='Blended')
pyplot.legend(loc='best').draggable()
pyplot.title(spec, fontsize=20)
pyplot.xlabel(x_label)
pyplot.grid(linewidth=0.5)
pyplot.gcf().canvas.set_window_title(spec)
def TileFigures(num_figures, num_rows=None, num_cols=None):
if num_rows is None:
num_rows = FLAGS.fig_rows
if num_cols is None:
num_cols = FLAGS.fig_cols
for i in range(num_figures):
offset_count, i_linear = divmod(i, num_rows * num_cols)
i_row, i_col = divmod(i_linear, num_cols)
manager = pyplot.figure(i).canvas.manager
manager.toolbar.pan()
manager.toolbar.hide()
width, height = 500, 410
offset = 30
manager.window.setGeometry(width * i_col + offset * offset_count,
height * i_row + offset * offset_count,
width, height - 40)
def main(argv):
# Parse flags.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '\nError: %s\n' % e
print 'Usage: %s\n%s' % (e, FLAGS)
sys.exit(1)
specs = FLAGS.specs
for spec in specs:
if not IsValidSpec(spec):
raise RuntimeError('Invalid spec: %s. %s' % (spec,
_VALID_SPEC_DESCRIPTION))
aero_model = physics.Aero(physics.GetAeroSimParams())
for i, spec in enumerate(specs):
pyplot.figure(i)
Plot(aero_model, FLAGS.alpha_degs, FLAGS.beta_degs, spec)
TileFigures(len(specs))
pyplot.show()
if __name__ == '__main__':
main(sys.argv)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
from oslo.config import cfg
from cloudbaseinit.openstack.common import importutils
from cloudbaseinit.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
' upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def call(context, topic, msg, timeout=None):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resources in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import abc
import urllib
from keystoneclient import exceptions
# Python 2.4 compat
try:
all
except NameError:
def all(iterable):
return True not in (not x for x in iterable)
def getid(obj):
"""
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
# Try to return the object's UUID first, if we have a UUID.
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
class Manager(object):
"""
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, api):
self.api = api
def _list(self, url, response_key, obj_class=None, body=None):
resp = None
if body:
resp, body = self.api.post(url, body=body)
else:
resp, body = self.api.get(url)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
if type(data) is dict:
data = data['values']
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key):
resp, body = self.api.get(url)
return self.resource_class(self, body[response_key], loaded=True)
def _head(self, url):
resp, body = self.api.head(url)
return resp.status_code == 204
def _create(self, url, body, response_key, return_raw=False):
resp, body = self.api.post(url, body=body)
if return_raw:
return body[response_key]
return self.resource_class(self, body[response_key])
def _delete(self, url):
resp, body = self.api.delete(url)
def _update(self, url, body=None, response_key=None, method="PUT",
management=True):
methods = {"PUT": self.api.put,
"POST": self.api.post,
"PATCH": self.api.patch}
try:
if body is not None:
resp, body = methods[method](url, body=body,
management=management)
else:
resp, body = methods[method](url, management=management)
except KeyError:
raise exceptions.ClientException("Invalid update method: %s"
% method)
# PUT requests may not return a body
if body:
return self.resource_class(self, body[response_key])
class ManagerWithFind(Manager):
"""
Like a `Manager`, but with additional `find()`/`findall()` methods.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""
Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
rl = self.findall(**kwargs)
num = len(rl)
if num == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
def findall(self, **kwargs):
"""
Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(Manager):
"""Base manager class for manipulating Keystone entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
If a `base_url` is provided, the generated URL will be appended to it.
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
# drop null values
for key, ref in kwargs.copy().iteritems():
if ref is None:
kwargs.pop(key)
else:
id_value = getid(ref)
if id_value != ref:
kwargs.pop(key)
kwargs['%s_id' % key] = id_value
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._create(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % urllib.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._update(
self.build_url(base_url=base_url, **kwargs),
method='PUT')
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._update(
self.build_url(**kwargs),
{self.key: params},
self.key,
method='PATCH')
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
class Resource(object):
"""
A resource represents a particular instance of an object (tenant, user,
etc). This is pretty much just a bag for attributes.
:param manager: Manager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
def __init__(self, manager, info, loaded=False):
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def _add_details(self, info):
for (k, v) in info.iteritems():
setattr(self, k, v)
def __getattr__(self, k):
if k not in self.__dict__:
#NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def get(self):
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def delete(self):
return self.manager.delete(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
|
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Translation API."""
import six
from google.cloud.client import Client as BaseClient
from google.cloud.translate_v2._http import Connection
ENGLISH_ISO_639 = "en"
"""ISO 639-1 language code for English."""
BASE = "base"
"""Base translation model."""
NMT = "nmt"
"""Neural Machine Translation model."""
class Client(BaseClient):
"""Client to bundle configuration needed for API requests.
:type target_language: str
:param target_language: (Optional) The target language used for
translations and language names. (Defaults to
:data:`ENGLISH_ISO_639`.)
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
:type _http: :class:`~requests.Session`
:param _http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
"""
SCOPE = ("https://www.googleapis.com/auth/cloud-platform",)
"""The scopes required for authenticating."""
def __init__(self, target_language=ENGLISH_ISO_639, credentials=None, _http=None):
self.target_language = target_language
super(Client, self).__init__(credentials=credentials, _http=_http)
self._connection = Connection(self)
def get_languages(self, target_language=None):
"""Get list of supported languages for translation.
Response
See
https://cloud.google.com/translate/docs/discovering-supported-languages
:type target_language: str
:param target_language: (Optional) The language used to localize
returned language names. Defaults to the
target language on the current client.
:rtype: list
:returns: List of dictionaries. Each dictionary contains a supported
ISO 639-1 language code (using the dictionary key
``language``). If ``target_language`` is passed, each
dictionary will also contain the name of each supported
language (localized to the target language).
"""
query_params = {}
if target_language is None:
target_language = self.target_language
if target_language is not None:
query_params["target"] = target_language
response = self._connection.api_request(
method="GET", path="/languages", query_params=query_params
)
return response.get("data", {}).get("languages", ())
def detect_language(self, values):
"""Detect the language of a string or list of strings.
See https://cloud.google.com/translate/docs/detecting-language
:type values: str or list
:param values: String or list of strings that will have
language detected.
:rtype: dict or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys
* ``confidence``: The confidence in language detection, a
float between 0 and 1.
* ``input``: The corresponding input value.
* ``language``: The detected language (as an ISO 639-1
language code).
though the key ``confidence`` may not always be present.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
detections is not equal to the number of values.
:class:`ValueError <exceptions.ValueError>` if a value
produces a list of detections with 0 or multiple results
in it.
"""
single_value = False
if isinstance(values, six.string_types):
single_value = True
values = [values]
data = {"q": values}
response = self._connection.api_request(
method="POST", path="/detect", data=data
)
detections = response.get("data", {}).get("detections", ())
if len(values) != len(detections):
raise ValueError(
"Expected same number of values and detections", values, detections
)
for index, value in enumerate(values):
# Empirically, even clearly ambiguous text like "no" only returns
# a single detection, so we replace the list of detections with
# the single detection contained.
if len(detections[index]) == 1:
detections[index] = detections[index][0]
else:
message = (
"Expected a single detection per value, API " "returned %d"
) % (len(detections[index]),)
raise ValueError(message, value, detections[index])
detections[index]["input"] = value
# The ``isReliable`` field is deprecated.
detections[index].pop("isReliable", None)
if single_value:
return detections[0]
else:
return detections
def translate(
self,
values,
target_language=None,
format_=None,
source_language=None,
customization_ids=(),
model=None,
):
"""Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str
:param source_language: (Optional) The language of the text to
be translated.
:type customization_ids: str or list
:param customization_ids: (Optional) ID or list of customization IDs
for translation. Sets the ``cid`` parameter
in the query.
:type model: str
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
"""
single_value = False
if isinstance(values, six.string_types):
single_value = True
values = [values]
if target_language is None:
target_language = self.target_language
if isinstance(customization_ids, six.string_types):
customization_ids = [customization_ids]
data = {
"target": target_language,
"q": values,
"cid": customization_ids,
"format": format_,
"source": source_language,
"model": model,
}
response = self._connection.api_request(method="POST", path="", data=data)
translations = response.get("data", {}).get("translations", ())
if len(values) != len(translations):
raise ValueError(
"Expected iterations to have same length", values, translations
)
for value, translation in six.moves.zip(values, translations):
translation["input"] = value
if single_value:
return translations[0]
else:
return translations
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the MRUListEx Windows Registry plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import winreg as winreg_formatter
from plaso.lib import timelib_test
from plaso.parsers.winreg_plugins import mrulistex
from plaso.parsers.winreg_plugins import test_lib
from plaso.winreg import interface as winreg_interface
from plaso.winreg import test_lib as winreg_test_lib
class TestMRUListExStringPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the string MRUListEx plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = mrulistex.MRUListExStringPlugin()
def testProcess(self):
"""Tests the Process function."""
key_path = u'\\Microsoft\\Some Windows\\InterestingApp\\MRUlist'
values = []
# The order is: 201
values.append(winreg_test_lib.TestRegValue(
'MRUListEx', '\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00',
winreg_interface.WinRegValue.REG_BINARY, 123))
values.append(winreg_test_lib.TestRegValue(
'0', 'Some random text here'.encode('utf_16_le'),
winreg_interface.WinRegValue.REG_SZ, 1892))
values.append(winreg_test_lib.TestRegValue(
'1', 'c:\\evil.exe'.encode('utf_16_le'),
winreg_interface.WinRegValue.REG_BINARY, 612))
values.append(winreg_test_lib.TestRegValue(
'2', 'C:\\looks_legit.exe'.encode('utf_16_le'),
winreg_interface.WinRegValue.REG_SZ, 1001))
winreg_key = winreg_test_lib.TestRegKey(
key_path, 1346145829002031, values, 1456)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 1)
# A MRUListEx event object.
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2012-08-28 09:23:49.002031')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_msg = (
u'[{0:s}] '
u'Index: 1 [MRU Value 2]: C:\\looks_legit.exe '
u'Index: 2 [MRU Value 0]: Some random text here '
u'Index: 3 [MRU Value 1]: c:\\evil.exe').format(key_path)
expected_msg_short = (
u'[{0:s}] Index: 1 [MRU Value 2]: C:\\l...').format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
class TestMRUListExShellItemListPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the shell item list MRUListEx plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = mrulistex.MRUListExShellItemListPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file = self._GetTestFilePath(['NTUSER-WIN7.DAT'])
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'OpenSavePidlMRU')
winreg_key = self._GetKeyFromFile(test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 65)
# A MRUListEx event object.
event_object = event_objects[40]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-08-28 22:48:28.159308')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_msg = (
u'[{0:s}\\exe] '
u'Index: 1 [MRU Value 1]: Shell item list: [My Computer, P:\\, '
u'Application Tools, Firefox 6.0, Firefox Setup 6.0.exe] '
u'Index: 2 [MRU Value 0]: Shell item list: [Computers and Devices, '
u'UNKNOWN: 0x00, \\\\controller\\WebDavShare, Firefox Setup 3.6.12.exe'
u']').format(key_path)
expected_msg_short = (
u'[\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'OpenSavePidlMRU...')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
# A shell item event object.
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2012-03-08 22:16:02')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_msg = (
u'Name: ALLOYR~1 '
u'Long name: Alloy Research '
u'NTFS file reference: 44518-33 '
u'Origin: {0:s}\\*').format(key_path)
expected_msg_short = (
u'Name: ALLOYR~1 '
u'NTFS file reference: 44518-33 '
u'Origin: \\Software\\Microsoft\\Wind...')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
class TestMRUListExStringAndShellItemPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item MRUListEx plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = mrulistex.MRUListExStringAndShellItemPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file = self._GetTestFilePath(['NTUSER-WIN7.DAT'])
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\RecentDocs')
winreg_key = self._GetKeyFromFile(test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 6)
# A MRUListEx event object.
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2012-04-01 13:52:39.113741')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_msg = (
u'[{0:s}] '
u'Index: 1 [MRU Value 17]: Path: The SHIELD, '
u'Shell item: [The SHIELD.lnk] '
u'Index: 10 [MRU Value 11]: Path: 5031RR_BalancedLeadership.pdf, '
u'Shell item: [5031RR_BalancedLeadership.lnk] '
u'Index: 11 [MRU Value 10]: '
u'Path: SA-23E Mitchell-Hyundyne Starfury.docx, '
u'Shell item: [SA-23E Mitchell-Hyundyne Starfury.lnk] '
u'Index: 12 [MRU Value 9]: Path: StarFury.docx, '
u'Shell item: [StarFury (3).lnk] '
u'Index: 13 [MRU Value 6]: Path: StarFury.zip, '
u'Shell item: [StarFury.lnk] '
u'Index: 14 [MRU Value 4]: Path: VIBRANIUM.docx, '
u'Shell item: [VIBRANIUM.lnk] '
u'Index: 15 [MRU Value 5]: Path: ADAMANTIUM-Background.docx, '
u'Shell item: [ADAMANTIUM-Background.lnk] '
u'Index: 16 [MRU Value 3]: Path: Pictures, '
u'Shell item: [Pictures.lnk] '
u'Index: 17 [MRU Value 2]: Path: nick_fury_77831.jpg, '
u'Shell item: [nick_fury_77831.lnk] '
u'Index: 18 [MRU Value 1]: Path: Downloads, '
u'Shell item: [Downloads.lnk] '
u'Index: 19 [MRU Value 0]: Path: wallpaper_medium.jpg, '
u'Shell item: [wallpaper_medium.lnk] '
u'Index: 2 [MRU Value 18]: '
u'Path: captain_america_shield_by_almogrem-d48x9x8.jpg, '
u'Shell item: [captain_america_shield_by_almogrem-d48x9x8.lnk] '
u'Index: 3 [MRU Value 16]: Path: captain-america-shield-front.jpg, '
u'Shell item: [captain-america-shield-front.lnk] '
u'Index: 4 [MRU Value 12]: Path: Leadership, '
u'Shell item: [Leadership.lnk] '
u'Index: 5 [MRU Value 15]: Path: followership.pdf, '
u'Shell item: [followership.lnk] '
u'Index: 6 [MRU Value 14]: Path: leaderqualities.pdf, '
u'Shell item: [leaderqualities.lnk] '
u'Index: 7 [MRU Value 13]: Path: htlhtl.pdf, '
u'Shell item: [htlhtl.lnk] '
u'Index: 8 [MRU Value 8]: Path: StarFury, '
u'Shell item: [StarFury (2).lnk] '
u'Index: 9 [MRU Value 7]: Path: Earth_SA-26_Thunderbolt.jpg, '
u'Shell item: [Earth_SA-26_Thunderbolt.lnk]').format(key_path)
expected_msg_short = (
u'[{0:s}] Index: 1 [MR...').format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
class TestMRUListExStringAndShellItemListPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item list MRUListEx plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = mrulistex.MRUListExStringAndShellItemListPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file = self._GetTestFilePath(['NTUSER-WIN7.DAT'])
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'LastVisitedPidlMRU')
winreg_key = self._GetKeyFromFile(test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 31)
# A MRUListEx event object.
event_object = event_objects[30]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2012-04-01 13:52:38.966290')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_msg = (
u'[{0:s}] '
u'Index: 1 [MRU Value 1]: Path: chrome.exe, '
u'Shell item list: [Users Libraries, UNKNOWN: 0x00, UNKNOWN: 0x00, '
u'UNKNOWN: 0x00] '
u'Index: 2 [MRU Value 7]: '
u'Path: {{48E1ED6B-CF49-4609-B1C1-C082BFC3D0B4}}, '
u'Shell item list: [Shared Documents Folder (Users Files), '
u'UNKNOWN: 0x00, Alloy Research] '
u'Index: 3 [MRU Value 6]: '
u'Path: {{427865A0-03AF-4F25-82EE-10B6CB1DED3E}}, '
u'Shell item list: [Users Libraries, UNKNOWN: 0x00, UNKNOWN: 0x00] '
u'Index: 4 [MRU Value 5]: '
u'Path: {{24B5C9BB-48B5-47FF-8343-40481DBA1E2B}}, '
u'Shell item list: [My Computer, C:\\, Users, nfury, Documents] '
u'Index: 5 [MRU Value 4]: '
u'Path: {{0B8CFE96-DB69-4D33-8E3C-36EAB4F709E0}}, '
u'Shell item list: [My Computer, C:\\, Users, nfury, Documents, '
u'Alloy Research] '
u'Index: 6 [MRU Value 3]: '
u'Path: {{D4F85F66-003D-4127-BCE9-CAD7A57B2857}}, '
u'Shell item list: [Users Libraries, UNKNOWN: 0x00, UNKNOWN: 0x00] '
u'Index: 7 [MRU Value 0]: Path: iexplore.exe, '
u'Shell item list: [My Computer, P:\\, Application Tools, Firefox 6.0] '
u'Index: 8 [MRU Value 2]: Path: Skype.exe, '
u'Shell item list: [Users Libraries, UNKNOWN: 0x00]').format(key_path)
expected_msg_short = (
u'[\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'LastVisitedPidl...')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
|
|
import hashlib
from struct import *
from pyelliptic import arithmetic
from binascii import hexlify, unhexlify
#There is another copy of this function in Bitmessagemain.py
def convertIntToString(n):
a = __builtins__.hex(n)
if a[-1:] == 'L':
a = a[:-1]
if (len(a) % 2) == 0:
return unhexlify(a[2:])
else:
return unhexlify('0'+a[2:])
ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
def encodeBase58(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
#print 'num is:', num
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def decodeBase58(string, alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
num = 0
try:
for char in string:
num *= base
num += alphabet.index(char)
except:
#character not found (like a space character or a 0)
return 0
return num
def encodeVarint(integer):
if integer < 0:
logger.error('varint cannot be < 0')
raise SystemExit
if integer < 253:
return pack('>B',integer)
if integer >= 253 and integer < 65536:
return pack('>B',253) + pack('>H',integer)
if integer >= 65536 and integer < 4294967296:
return pack('>B',254) + pack('>I',integer)
if integer >= 4294967296 and integer < 18446744073709551616:
return pack('>B',255) + pack('>Q',integer)
if integer >= 18446744073709551616:
logger.error('varint cannot be >= 18446744073709551616')
raise SystemExit
class varintDecodeError(Exception):
pass
def decodeVarint(data):
"""
Decodes an encoded varint to an integer and returns it.
Per protocol v3, the encoded value must be encoded with
the minimum amount of data possible or else it is malformed.
Returns a tuple: (theEncodedValue, theSizeOfTheVarintInBytes)
"""
if len(data) == 0:
return (0,0)
firstByte, = unpack('>B',data[0:1])
if firstByte < 253:
# encodes 0 to 252
return (firstByte,1) #the 1 is the length of the varint
if firstByte == 253:
# encodes 253 to 65535
if len(data) < 3:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 3.' % (firstByte, len(data)))
encodedValue, = unpack('>H',data[1:3])
if encodedValue < 253:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,3)
if firstByte == 254:
# encodes 65536 to 4294967295
if len(data) < 5:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 5.' % (firstByte, len(data)))
encodedValue, = unpack('>I',data[1:5])
if encodedValue < 65536:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,5)
if firstByte == 255:
# encodes 4294967296 to 18446744073709551615
if len(data) < 9:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 9.' % (firstByte, len(data)))
encodedValue, = unpack('>Q',data[1:9])
if encodedValue < 4294967296:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,9)
def calculateInventoryHash(data):
sha = hashlib.new('sha512')
sha2 = hashlib.new('sha512')
sha.update(data)
sha2.update(sha.digest())
return sha2.digest()[0:32]
def encodeAddress(version,stream,ripe):
if version >= 2 and version < 4:
if len(ripe) != 20:
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
if ripe[:2] == '\x00\x00':
ripe = ripe[2:]
elif ripe[:1] == '\x00':
ripe = ripe[1:]
elif version == 4:
if len(ripe) != 20:
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
ripe = ripe.lstrip('\x00')
storedBinaryData = encodeVarint(version) + encodeVarint(stream) + ripe
# Generate the checksum
sha = hashlib.new('sha512')
sha.update(storedBinaryData)
currentHash = sha.digest()
sha = hashlib.new('sha512')
sha.update(currentHash)
checksum = sha.digest()[0:4]
asInt = int(hexlify(storedBinaryData) + hexlify(checksum),16)
return 'BM-'+ encodeBase58(asInt)
def decodeAddress(address):
#returns (status, address version number, stream number, data (almost certainly a ripe hash))
address = str(address).strip()
if address[:3] == 'BM-':
integer = decodeBase58(address[3:])
else:
integer = decodeBase58(address)
if integer == 0:
status = 'invalidcharacters'
return status,0,0,""
#after converting to hex, the string will be prepended with a 0x and appended with a L
hexdata = hex(integer)[2:-1]
if len(hexdata) % 2 != 0:
hexdata = '0' + hexdata
#print 'hexdata', hexdata
data = unhexlify(hexdata)
checksum = data[-4:]
sha = hashlib.new('sha512')
sha.update(data[:-4])
currentHash = sha.digest()
#print 'sha after first hashing: ', sha.hexdigest()
sha = hashlib.new('sha512')
sha.update(currentHash)
#print 'sha after second hashing: ', sha.hexdigest()
if checksum != sha.digest()[0:4]:
status = 'checksumfailed'
return status,0,0,""
#else:
# print 'checksum PASSED'
try:
addressVersionNumber, bytesUsedByVersionNumber = decodeVarint(data[:9])
except varintDecodeError as e:
logger.error(str(e))
status = 'varintmalformed'
return status,0,0,""
#print 'addressVersionNumber', addressVersionNumber
#print 'bytesUsedByVersionNumber', bytesUsedByVersionNumber
if addressVersionNumber > 4:
logger.error('cannot decode address version numbers this high')
status = 'versiontoohigh'
return status,0,0,""
elif addressVersionNumber == 0:
logger.error('cannot decode address version numbers of zero.')
status = 'versiontoohigh'
return status,0,0,""
try:
streamNumber, bytesUsedByStreamNumber = decodeVarint(data[bytesUsedByVersionNumber:])
except varintDecodeError as e:
logger.error(str(e))
status = 'varintmalformed'
return status,0,0,""
#print streamNumber
status = 'success'
if addressVersionNumber == 1:
return status,addressVersionNumber,streamNumber,data[-24:-4]
elif addressVersionNumber == 2 or addressVersionNumber == 3:
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
if len(embeddedRipeData) == 19:
return status,addressVersionNumber,streamNumber,'\x00'+embeddedRipeData
elif len(embeddedRipeData) == 20:
return status,addressVersionNumber,streamNumber,embeddedRipeData
elif len(embeddedRipeData) == 18:
return status,addressVersionNumber,streamNumber,'\x00\x00'+embeddedRipeData
elif len(embeddedRipeData) < 18:
return 'ripetooshort',0,0,""
elif len(embeddedRipeData) > 20:
return 'ripetoolong',0,0,""
else:
return 'otherproblem',0,0,""
elif addressVersionNumber == 4:
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
if embeddedRipeData[0:1] == '\x00':
# In order to enforce address non-malleability, encoded RIPE data must have NULL bytes removed from the front
return 'encodingproblem',0,0,""
elif len(embeddedRipeData) > 20:
return 'ripetoolong',0,0,""
elif len(embeddedRipeData) < 4:
return 'ripetooshort',0,0,""
else:
x00string = '\x00' * (20 - len(embeddedRipeData))
return status,addressVersionNumber,streamNumber,x00string+embeddedRipeData
def addBMIfNotPresent(address):
address = str(address).strip()
if address[:3] != 'BM-':
return 'BM-'+address
else:
return address
if __name__ == "__main__":
print 'Let us make an address from scratch. Suppose we generate two random 32 byte values and call the first one the signing key and the second one the encryption key:'
privateSigningKey = '93d0b61371a54b53df143b954035d612f8efa8a3ed1cf842c2186bfd8f876665'
privateEncryptionKey = '4b0b73a54e19b059dc274ab69df095fe699f43b17397bca26fdf40f4d7400a3a'
print 'privateSigningKey =', privateSigningKey
print 'privateEncryptionKey =', privateEncryptionKey
print 'Now let us convert them to public keys by doing an elliptic curve point multiplication.'
publicSigningKey = arithmetic.privtopub(privateSigningKey)
publicEncryptionKey = arithmetic.privtopub(privateEncryptionKey)
print 'publicSigningKey =', publicSigningKey
print 'publicEncryptionKey =', publicEncryptionKey
print 'Notice that they both begin with the \\x04 which specifies the encoding type. This prefix is not send over the wire. You must strip if off before you send your public key across the wire, and you must add it back when you receive a public key.'
publicSigningKeyBinary = arithmetic.changebase(publicSigningKey,16,256,minlen=64)
publicEncryptionKeyBinary = arithmetic.changebase(publicEncryptionKey,16,256,minlen=64)
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(publicSigningKeyBinary+publicEncryptionKeyBinary)
ripe.update(sha.digest())
addressVersionNumber = 2
streamNumber = 1
print 'Ripe digest that we will encode in the address:', hexlify(ripe.digest())
returnedAddress = encodeAddress(addressVersionNumber,streamNumber,ripe.digest())
print 'Encoded address:', returnedAddress
status,addressVersionNumber,streamNumber,data = decodeAddress(returnedAddress)
print '\nAfter decoding address:'
print 'Status:', status
print 'addressVersionNumber', addressVersionNumber
print 'streamNumber', streamNumber
print 'length of data(the ripe hash):', len(data)
print 'ripe data:', hexlify(data)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import ad_group_ad
from google.ads.googleads.v8.services.types import ad_group_ad_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AdGroupAdServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdGroupAdServiceGrpcTransport
class AdGroupAdServiceClientMeta(type):
"""Metaclass for the AdGroupAdService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdGroupAdServiceTransport]]
_transport_registry["grpc"] = AdGroupAdServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdGroupAdServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupAdServiceClient(metaclass=AdGroupAdServiceClientMeta):
"""Service to manage ads in an ad group."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupAdServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupAdServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_path(customer_id: str, ad_id: str,) -> str:
"""Return a fully-qualified ad string."""
return "customers/{customer_id}/ads/{ad_id}".format(
customer_id=customer_id, ad_id=ad_id,
)
@staticmethod
def parse_ad_path(path: str) -> Dict[str, str]:
"""Parse a ad path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/ads/(?P<ad_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def ad_group_path(customer_id: str, ad_group_id: str,) -> str:
"""Return a fully-qualified ad_group string."""
return "customers/{customer_id}/adGroups/{ad_group_id}".format(
customer_id=customer_id, ad_group_id=ad_group_id,
)
@staticmethod
def parse_ad_group_path(path: str) -> Dict[str, str]:
"""Parse a ad_group path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def ad_group_ad_path(
customer_id: str, ad_group_id: str, ad_id: str,
) -> str:
"""Return a fully-qualified ad_group_ad string."""
return "customers/{customer_id}/adGroupAds/{ad_group_id}~{ad_id}".format(
customer_id=customer_id, ad_group_id=ad_group_id, ad_id=ad_id,
)
@staticmethod
def parse_ad_group_ad_path(path: str) -> Dict[str, str]:
"""Parse a ad_group_ad path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupAds/(?P<ad_group_id>.+?)~(?P<ad_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def ad_group_ad_label_path(
customer_id: str, ad_group_id: str, ad_id: str, label_id: str,
) -> str:
"""Return a fully-qualified ad_group_ad_label string."""
return "customers/{customer_id}/adGroupAdLabels/{ad_group_id}~{ad_id}~{label_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
ad_id=ad_id,
label_id=label_id,
)
@staticmethod
def parse_ad_group_ad_label_path(path: str) -> Dict[str, str]:
"""Parse a ad_group_ad_label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupAdLabels/(?P<ad_group_id>.+?)~(?P<ad_id>.+?)~(?P<label_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdGroupAdServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group ad service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupAdServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupAdServiceTransport):
# transport is a AdGroupAdServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupAdServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group_ad(
self,
request: ad_group_ad_service.GetAdGroupAdRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_ad.AdGroupAd:
r"""Returns the requested ad in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAdGroupAdRequest`):
The request object. Request message for
[AdGroupAdService.GetAdGroupAd][google.ads.googleads.v8.services.AdGroupAdService.GetAdGroupAd].
resource_name (:class:`str`):
Required. The resource name of the ad
to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AdGroupAd:
An ad group ad.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_ad_service.GetAdGroupAdRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_ad_service.GetAdGroupAdRequest):
request = ad_group_ad_service.GetAdGroupAdRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad_group_ad]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_ad_group_ads(
self,
request: ad_group_ad_service.MutateAdGroupAdsRequest = None,
*,
customer_id: str = None,
operations: Sequence[ad_group_ad_service.AdGroupAdOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_ad_service.MutateAdGroupAdsResponse:
r"""Creates, updates, or removes ads. Operation statuses are
returned.
List of thrown errors: `AdCustomizerError <>`__ `AdError <>`__
`AdGroupAdError <>`__ `AdSharingError <>`__ `AdxError <>`__
`AssetError <>`__ `AssetLinkError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`CollectionSizeError <>`__ `ContextError <>`__
`DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__
`FeedAttributeReferenceError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `FunctionError <>`__
`FunctionParsingError <>`__ `HeaderError <>`__ `IdError <>`__
`ImageError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MediaBundleError <>`__ `MediaFileError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NotEmptyError <>`__
`NullError <>`__ `OperationAccessDeniedError <>`__
`OperatorError <>`__ `PolicyFindingError <>`__
`PolicyValidationParameterError <>`__
`PolicyViolationError <>`__ `QuotaError <>`__ `RangeError <>`__
`RequestError <>`__ `ResourceCountLimitExceededError <>`__
`SizeLimitError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UrlFieldError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.MutateAdGroupAdsRequest`):
The request object. Request message for
[AdGroupAdService.MutateAdGroupAds][google.ads.googleads.v8.services.AdGroupAdService.MutateAdGroupAds].
customer_id (:class:`str`):
Required. The ID of the customer
whose ads are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v8.services.types.AdGroupAdOperation]`):
Required. The list of operations to
perform on individual ads.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateAdGroupAdsResponse:
Response message for an ad group ad
mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_ad_service.MutateAdGroupAdsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_ad_service.MutateAdGroupAdsRequest):
request = ad_group_ad_service.MutateAdGroupAdsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_ad_group_ads
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdGroupAdServiceClient",)
|
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
User Interface base classes and themes.
"""
import sys
import os
import time
from io import BytesIO
from pycopia import environ
from pycopia import cliutils
from pycopia import tty
from pycopia.fsm import FSM, ANY
# set the PROMPT ignore depending on whether or not readline module is
# available.
try:
import readline
PROMPT_START_IGNORE = '\001'
PROMPT_END_IGNORE = '\002'
except ImportError:
readline = None
PROMPT_START_IGNORE = ''
PROMPT_END_IGNORE = ''
from types import MethodType, FunctionType
class UIError(Exception):
pass
class UIFindError(UIError):
pass
# themes define some basic "look and feel" for a CLI. This includes prompt
# srtrings and color set.
class Theme(object):
NORMAL = RESET = ""
BOLD = BRIGHT = ""
BLACK = ""
RED = ""
GREEN = ""
YELLOW = ""
BLUE = ""
MAGENTA = ""
CYAN = ""
WHITE = ""
DEFAULT = ""
GREY = ""
BRIGHTRED = ""
BRIGHTGREEN = ""
BRIGHTYELLOW = ""
BRIGHTBLUE = ""
BRIGHTMAGENTA = ""
BRIGHTCYAN = ""
BRIGHTWHITE = ""
UNDERSCORE = ""
BLINK = ""
help_local = WHITE
help_inherited = YELLOW
help_created = GREEN
def __init__(self, ps1="> ", ps2="more> ", ps3="choose", ps4="-> "):
self._ps1 = ps1 # main prompt
self._ps2 = ps2 # more input needed
self._ps3 = ps3 # choose prompt
self._ps4 = ps4 # input prompt
self._setcolors()
def _set_ps1(self, new):
self._ps1 = str(new)
def _set_ps2(self, new):
self._ps2 = str(new)
def _set_ps3(self, new):
self._ps3 = str(new)
def _set_ps4(self, new):
self._ps4 = str(new)
_setcolors = classmethod(lambda c: None)
ps1 = property(lambda s: s._ps1, _set_ps1, None, "primary prompt")
ps2 = property(lambda s: s._ps2, _set_ps2, None, "more input needed")
ps3 = property(lambda s: s._ps3, _set_ps3, None, "choose prompt")
ps4 = property(lambda s: s._ps4, _set_ps4, None, "text input prompt")
class BasicTheme(Theme):
def _setcolors(cls):
"Base class for themes. Defines interface."
cls.NORMAL = cls.RESET = "\x1b[0m"
cls.BOLD = cls.BRIGHT = "\x1b[1m"
cls.BLACK = ""
cls.RED = ""
cls.GREEN = ""
cls.YELLOW = ""
cls.BLUE = ""
cls.MAGENTA = ""
cls.CYAN = ""
cls.WHITE = ""
cls.DEFAULT = ""
cls.GREY = ""
cls.BRIGHTRED = ""
cls.BRIGHTGREEN = ""
cls.BRIGHTYELLOW = ""
cls.BRIGHTBLUE = ""
cls.BRIGHTMAGENTA = ""
cls.BRIGHTCYAN = ""
cls.BRIGHTWHITE = ""
cls.UNDERSCORE = "\x1b[4m"
cls.BLINK = "\x1b[5m"
cls.help_local = cls.WHITE
cls.help_inherited = cls.YELLOW
cls.help_created = cls.GREEN
_setcolors = classmethod(_setcolors)
class ANSITheme(BasicTheme):
"""Defines tunable parameters for the UserInterface, to provide
different color schemes and prompts.
"""
def _setcolors(cls):
# ANSI escapes for color terminals
cls.NORMAL = cls.RESET = "\x1b[0m"
cls.BOLD = cls.BRIGHT = "\x1b[01m"
cls.BLACK = "\x1b[30m"
cls.RED = "\x1b[31m"
cls.GREEN = "\x1b[32m"
cls.YELLOW = "\x1b[33m"
cls.BLUE = "\x1b[34m"
cls.MAGENTA = "\x1b[35m"
cls.CYAN = "\x1b[36m"
cls.WHITE = "\x1b[37m"
cls.GREY = "\x1b[30;01m"
cls.BRIGHTRED = "\x1b[31;01m"
cls.BRIGHTGREEN = "\x1b[32;01m"
cls.BRIGHTYELLOW = "\x1b[33;01m"
cls.BRIGHTBLUE = "\x1b[34;01m"
cls.BRIGHTMAGENTA = "\x1b[35;01m"
cls.BRIGHTCYAN = "\x1b[36;01m"
cls.BRIGHTWHITE = "\x1b[37;01m"
cls.DEFAULT = "\x1b[39;49m"
cls.UNDERSCORE = "\x1b[4m"
cls.BLINK = "\x1b[5m"
cls.help_local = cls.BRIGHTWHITE
cls.help_inherited = cls.YELLOW
cls.help_created = cls.GREEN
_setcolors = classmethod(_setcolors)
DefaultTheme = ANSITheme
class UserInterface(object):
"""An ANSI terminal user interface for CLIs. """
def __init__(self, io, env=None, theme=None):
self.set_IO(io)
self._env = env or environ.Environ()
assert hasattr(self._env, "get")
self._env["_"] = None
self._cache = {}
self.set_theme(theme)
self._initfsm()
self.initialize()
def set_IO(self, io):
self._io = io
if io.isatty():
self._termlen, self._termwidth, x, y = tty.get_winsize(io.fileno())
else:
self._termlen, self._termwidth = 24, 80
def get_IO(self):
return self._io
def _del_IO(self):
self._io = None
IO = property(get_IO, set_IO, _del_IO)
def __del__(self):
try:
self.finalize()
except:
pass
def initialize(self, *args):
pass
def finalize(self):
pass
def close(self):
if self._io is not None:
self._io.close()
self._io = None
def set_environ(self, env):
assert hasattr(env, "get")
self._env = env
self._env["_"] = None
def set_theme(self, theme):
self._theme = theme or DefaultTheme()
assert isinstance(self._theme, Theme), "must supply a Theme object."
self._env.setdefault("PS1", self._theme.ps1)
self._env.setdefault("PS2", self._theme.ps2)
self._env.setdefault("PS3", self._theme.ps3)
self._env.setdefault("PS4", self._theme.ps4)
def clone(self, theme=None):
return self.__class__(self._io, self._env.copy(), theme or self._theme)
# output methods
def Print(self, *objs):
wr = self._io.write
if objs:
try:
for obj in objs[:-1]:
wr(str(obj))
wr(" ")
last = objs[-1]
if last is not None: # don't NL if last value is None (works like trailing comma).
wr(str(last))
wr("\n")
except tty.PageQuitError:
return
else:
wr("\n")
self._io.flush()
def pprint(self, obj):
self._format(obj, 0, 0, set(), 0)
self._io.write("\n")
self._io.flush()
def print_obj(self, obj, nl=1):
if nl:
self._io.write("%s\n" % (obj,))
else:
self._io.write(str(obj))
self._io.flush()
def print_list(self, clist, indent=0):
if clist:
width = self._termwidth - 9
indent = min(max(indent,0),width)
ps = " " * indent
try:
for c in clist[:-1]:
cs = "%s, " % (c,)
if len(ps) + len(cs) > width:
self.print_obj(ps)
ps = "%s%s" % (" " * indent, cs)
else:
ps += cs
self.print_obj("%s%s" % (ps, clist[-1]))
except tty.PageQuitError:
pass
def write(self, text):
self._io.write(text)
def printf(self, text):
"Print text run through the expansion formatter."
self.Print(self.format(text))
def error(self, text):
self.printf("%%r%s%%N" % (text,))
def warning(self, text):
self.printf("%%Y%s%%N" % (text,))
# user input
def _get_prompt(self, name, prompt=None):
return self.prompt_format(prompt or self._env[name])
def user_input(self, prompt=None):
return self._io.raw_input(self._get_prompt("PS1", prompt))
def more_user_input(self):
return self._io.raw_input(self._get_prompt("PS2"))
def choose(self, somelist, defidx=0, prompt=None):
return cliutils.choose(somelist,
defidx,
self._get_prompt("PS3", prompt),
input=self._io.raw_input, error=self.error)
def choose_value(self, somemap, default=None, prompt=None):
return cliutils.choose_value(somemap,
default,
self._get_prompt("PS3", prompt),
input=self._io.raw_input, error=self.error)
def choose_key(self, somemap, default=None, prompt=None):
return cliutils.choose_key(somemap,
default,
self._get_prompt("PS3", prompt),
input=self._io.raw_input, error=self.error)
def choose_multiple(self, somelist, chosen=None, prompt=None):
return cliutils.choose_multiple(somelist,
chosen,
self._get_prompt("PS3", prompt),
input=self._io.raw_input, error=self.error)
def choose_multiple_from_map(self, somemap, chosen=None, prompt=None):
return cliutils.choose_multiple_from_map(somemap,
chosen,
self._get_prompt("PS3", prompt),
input=self._io.raw_input, error=self.error)
def get_text(self, msg=None):
return cliutils.get_text(self._get_prompt("PS4"), msg, input=self._io.raw_input)
def get_value(self, prompt, default=None):
return cliutils.get_input(self.prompt_format(prompt), default, self._io.raw_input)
def edit_text(self, text, prompt=None):
return cliutils.edit_text(text, self._get_prompt("PS4", prompt))
def get_int(self, prompt="", default=None):
return cliutils.get_int(prompt, default, input=self._io.raw_input, error=self.error)
def get_float(self, prompt="", default=None):
return cliutils.get_float(prompt, default, input=self._io.raw_input, error=self.error)
def get_bool(self, prompt="", default=None):
return cliutils.get_bool(prompt, default, input=self._io.raw_input, error=self.error)
def yes_no(self, prompt, default=True):
while 1:
yesno = cliutils.get_input(self.prompt_format(prompt), "Y" if default else "N", self._io.raw_input)
yesno = yesno.upper()
if yesno.startswith("Y"):
return True
elif yesno.startswith("N"):
return False
else:
self.Print("Please enter yes or no.")
def get_key(self, prompt=""):
return tty.get_key(prompt)
def get_password(self, prompt="Password: "):
return tty.getpass(prompt)
def get_winsize(self):
rows, cols, xpixel, ypixel = tty.get_winsize(self._io.fileno())
return rows, cols
# docstring/help formatters
def _format_doc(self, s, color):
i = s.find("\n")
if i > 0:
return color + s[:i] + self._theme.NORMAL + self.format(s[i:]) + "\n"
else:
return color + s + self._theme.NORMAL + "\n"
def help_local(self, text):
self.Print(self._format_doc(text, self._theme.help_local))
def help_inherited(self, text):
self.Print(self._format_doc(text, self._theme.help_inherited))
def help_created(self, text):
self.Print(self._format_doc(text, self._theme.help_created))
def prompt_format(self, ps):
"Expand percent-exansions in a string for readline prompts."
self._fsm.process_string(ps)
return self._getarg()
def format(self, ps):
"Expand percent-exansions in a string and return the result."
self._ffsm.process_string(ps)
if self._ffsm.arg:
arg = self._ffsm.arg
self._ffsm.arg = ''
return arg
else:
return None
def format_wrap(self, obj, formatstring):
return FormatWrapper(obj, self, formatstring)
def register_prompt_expansion(self, key, func):
"""Register a percent-expansion function for the prompt format method. The
function must take one argument, and return a string. The argument is
the character expanded on.
"""
key = str(key)[0]
if not key in self._PROMPT_EXPANSIONS:
self._PROMPT_EXPANSIONS[key] = func
else:
raise ValueError("expansion key %r already exists." % (key, ))
def register_format_expansion(self, key, func):
"""Register a percent-expansion function for the format method. The
function must take one argument, and return a string. The argument is
the character expanded on.
"""
key = str(key)[0]
if key not in self._FORMAT_EXPANSIONS:
self._FORMAT_EXPANSIONS[key] = func
else:
raise ValueError("expansion key %r already exists." % (key, ))
def unregister_format_expansion(self, key):
key = str(key)[0]
try:
del self._FORMAT_EXPANSIONS[key]
except KeyError:
pass
# FSM for prompt expansion
def _initfsm(self):
# maps percent-expansion items to some value.
theme = self._theme
self._PROMPT_EXPANSIONS = { # used in prompt strings given to readline library.
"I":PROMPT_START_IGNORE + theme.BRIGHT + PROMPT_END_IGNORE,
"N":PROMPT_START_IGNORE + theme.NORMAL + PROMPT_END_IGNORE,
"D":PROMPT_START_IGNORE + theme.DEFAULT + PROMPT_END_IGNORE,
"R":PROMPT_START_IGNORE + theme.BRIGHTRED + PROMPT_END_IGNORE,
"G":PROMPT_START_IGNORE + theme.BRIGHTGREEN + PROMPT_END_IGNORE,
"Y":PROMPT_START_IGNORE + theme.BRIGHTYELLOW + PROMPT_END_IGNORE,
"B":PROMPT_START_IGNORE + theme.BRIGHTBLUE + PROMPT_END_IGNORE,
"M":PROMPT_START_IGNORE + theme.BRIGHTMAGENTA + PROMPT_END_IGNORE,
"C":PROMPT_START_IGNORE + theme.BRIGHTCYAN + PROMPT_END_IGNORE,
"W":PROMPT_START_IGNORE + theme.BRIGHTWHITE + PROMPT_END_IGNORE,
"r":PROMPT_START_IGNORE + theme.RED + PROMPT_END_IGNORE,
"g":PROMPT_START_IGNORE + theme.GREEN + PROMPT_END_IGNORE,
"y":PROMPT_START_IGNORE + theme.YELLOW + PROMPT_END_IGNORE,
"b":PROMPT_START_IGNORE + theme.BLUE + PROMPT_END_IGNORE,
"m":PROMPT_START_IGNORE + theme.MAGENTA + PROMPT_END_IGNORE,
"c":PROMPT_START_IGNORE + theme.CYAN + PROMPT_END_IGNORE,
"w":PROMPT_START_IGNORE + theme.WHITE + PROMPT_END_IGNORE,
"n":"\n", "l":self._tty, "h":self._hostname, "u":self._username,
"$": self._priv, "d":self._cwd, "L": self._shlvl, "t":self._time,
"T":self._date}
self._FORMAT_EXPANSIONS = {
"I": theme.BRIGHT,
"N": theme.NORMAL,
"D": theme.DEFAULT,
"R": theme.BRIGHTRED,
"G": theme.BRIGHTGREEN,
"Y": theme.BRIGHTYELLOW,
"B": theme.BRIGHTBLUE,
"M": theme.BRIGHTMAGENTA,
"C": theme.BRIGHTCYAN,
"W": theme.BRIGHTWHITE,
"r": theme.RED,
"g": theme.GREEN,
"y": theme.YELLOW,
"b": theme.BLUE,
"m": theme.MAGENTA,
"c": theme.CYAN,
"w": theme.WHITE,
"n":"\n", "l":self._tty, "h":self._hostname, "u":self._username,
"$": self._priv, "d":self._cwd, "L": self._shlvl, "t":self._time,
"T":self._date}
fp = FSM(0)
fp.add_default_transition(self._error, 0)
# add text to args
fp.add_transition(ANY, 0, self._addtext, 0)
# percent escapes
fp.add_transition("%", 0, None, 1)
fp.add_transition("%", 1, self._addtext, 0)
fp.add_transition("{", 1, self._startvar, 2)
fp.add_transition("}", 2, self._endvar, 0)
fp.add_transition(ANY, 2, self._vartext, 2)
fp.add_transition(ANY, 1, self._prompt_expand, 0)
fp.arg = ''
self._fsm = fp
ff = FSM(0)
ff.add_default_transition(self._error, 0)
# add text to args
ff.add_transition(ANY, 0, self._addtext, 0)
# percent escapes
ff.add_transition("%", 0, None, 1)
ff.add_transition("%", 1, self._addtext, 0)
ff.add_transition("{", 1, self._startvar, 2)
ff.add_transition("}", 2, self._endvar, 0)
ff.add_transition(ANY, 2, self._vartext, 2)
ff.add_transition(ANY, 1, self._format_expand, 0)
ff.arg = ''
self._ffsm = ff
def _startvar(self, c, fsm):
fsm.varname = ""
def _vartext(self, c, fsm):
fsm.varname += c
def _endvar(self, c, fsm):
fsm.arg += str(self._env.get(fsm.varname, fsm.varname))
def _prompt_expand(self, c, fsm):
return self._expand(c, fsm, self._PROMPT_EXPANSIONS)
def _format_expand(self, c, fsm):
return self._expand(c, fsm, self._FORMAT_EXPANSIONS)
def _expand(self, c, fsm, mapping):
try:
arg = self._cache[c]
except KeyError:
try:
arg = mapping[c]
except KeyError:
arg = c
else:
if callable(arg):
arg = str(arg(c))
fsm.arg += arg
def _username(self, c):
un = os.environ.get("USERNAME") or os.environ.get("USER")
if un:
self._cache[c] = un
return un
def _shlvl(self, c):
return str(self._env.get("SHLVL", ""))
def _hostname(self, c):
hn = os.uname()[1]
self._cache[c] = hn
return hn
def _priv(self, c):
if os.getuid() == 0:
arg = "#"
else:
arg = ">"
self._cache[c] = arg
return arg
def _tty(self, c):
n = os.ttyname(self._io.fileno())
self._cache[c] = n
return n
def _cwd(self, c):
return os.getcwd()
def _time(self, c):
return time.strftime("%H:%M:%S", time.localtime())
def _date(self, c):
return time.strftime("%m/%d/%Y", time.localtime())
def _error(self, input_symbol, fsm):
self._io.errlog('Prompt string error: %s\n%r' % (input_symbol, fsm.stack))
fsm.reset()
def _addtext(self, c, fsm):
fsm.arg += c
def _getarg(self):
if self._fsm.arg:
arg = self._fsm.arg
self._fsm.arg = ''
return arg
else:
return None
# pretty printing
def _format(self, obj, indent, allowance, context, level):
level = level + 1
objid = id(obj)
if objid in context:
self._io.write(_recursion(obj))
return
rep = self._repr(obj, context, level - 1)
typ = type(obj)
sep_lines = len(rep) > (self._termwidth - 1 - indent - allowance)
write = self._io.write
if sep_lines:
if typ is dict:
write('{\n ')
length = len(obj)
if length:
context[objid] = 1
indent = indent + 2
items = obj.items()
items.sort()
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, indent + len(rep) + 2, allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
write(',\n%s%s: ' % (' '*indent, rep))
self._format(ent, indent + len(rep) + 2, allowance + 1, context, level)
indent = indent - 2
del context[objid]
write('\n}')
return
if typ is list:
write('[\n')
self.print_list(obj, 2)
write(']')
return
if typ is tuple:
write('(\n')
self.print_list(obj, 2)
if len(obj) == 1:
write(',')
write(')')
return
write(rep)
def _repr(self, obj, context, level):
return self._safe_repr(obj, context.copy(), None, level)
def _safe_repr(self, obj, context, maxlevels, level):
return _safe_repr(obj, context, maxlevels, level)
class FormatWrapper(object):
"""Wrap any object with a format.
The format string should have an '%O' component that will be expanded to
the stringified object given here.
"""
def __init__(self, obj, ui, format):
self.value = obj
self._ui = ui
self._format = format
def __str__(self):
self._ui.register_format_expansion("O", self._str_value)
try:
return self._ui.format(self._format)
finally:
self._ui.unregister_format_expansion("O")
def _str_value(self, c):
return str(self.value)
def __len__(self):
return len(str(self.value))
def __repr__(self):
return _safe_repr(self.value, set(), None, 0)
def __cmp__(self, other):
if type(other) is FormatWrapper:
return cmp(self.value, other.value)
else:
return cmp(self.value, other)
def safe_repr(value):
"""Return a representational string of the given object.
Large or recursive objects or detected and clipped.
"""
return _safe_repr(value, set(), None, 0)
# Return repr_string
def _safe_repr(obj, context, maxlevels, level):
typ = type(obj)
if typ is str:
if 'locale' not in sys.modules:
return repr(obj)
if "'" in obj and '"' not in obj:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = BytesIO()
write = sio.write
for char in obj:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure))
if typ is dict:
if not obj:
return "{}"
objid = id(obj)
if maxlevels and level > maxlevels:
return "{...}"
if objid in context:
return _recursion(obj)
context[objid] = 1
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in obj.iteritems():
krepr = saferepr(k, context, maxlevels, level)
vrepr = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
del context[objid]
return "{%s}" % ", ".join(components)
if typ is list or typ is tuple:
if typ is list:
if not obj:
return "[]"
format = "[%s]"
elif len(obj) == 1:
format = "(%s,)"
else:
if not obj:
return "()"
format = "(%s)"
objid = id(obj)
if maxlevels and level > maxlevels:
return format % "..."
if objid in context:
return _recursion(obj)
context[objid] = 1
components = []
append = components.append
level += 1
for o in obj:
orepr = _safe_repr(o, context, maxlevels, level)
append(orepr)
del context[objid]
return format % ", ".join(components)
if typ is MethodType:
return method_repr(obj)
if typ is FunctionType:
return function_repr(obj)
return repr(obj)
def _recursion(obj):
return ("<Recursion on %s with id=%s>" % (type(obj).__name__, id(obj)))
def method_repr(method):
methname = method.im_func.func_name
# formal names
varnames = list(method.im_func.func_code.co_varnames)[:method.im_func.func_code.co_argcount]
if method.im_func.func_defaults:
ld = len(method.im_func.func_defaults)
varlist = [", ".join(varnames[:-ld]),
", ".join(["%s=%r" % (n, v) for n, v in zip(varnames[-ld:], method.im_func.func_defaults)])]
return "%s(%s)" % (methname, ", ".join(varlist))
else:
return "%s(%s)" % (methname, ", ".join(varnames))
def function_repr(func):
methname = func.func_name
# formal names
argcount = func.func_code.co_argcount
varnames = list(func.func_code.co_varnames)[:argcount]
if func.func_defaults:
ld = len(func.func_defaults)
varlist = varnames[:-ld]
varlist.extend(["%s=%r" % (n, v) for n, v in zip(varnames[-ld:], func.func_defaults)])
return "%s(%s)" % (methname, ", ".join(varlist))
else:
return "%s(%s)" % (methname, ", ".join(varnames))
def _get_object(name):
try:
return getattr(sys.modules[__name__], name)
except AttributeError:
i = name.rfind(".")
if i >= 0:
modname = name[:i]
try:
mod = sys.modules[modname]
except KeyError:
try:
mod = __import__(modname, globals(), locals(), ["*"])
except ImportError as err:
raise UIFindError("Could not find UI module %s: %s" % (modname, err))
try:
return getattr(mod, name[i+1:])
except AttributeError:
raise UIFindError("Could not find UI object %r in module %r." % (name, modname))
else:
raise UIFindError("%s is not a valid object path." % (name,))
# construct a user interface from object names given as strings.
def get_userinterface(uiname="UserInterface",
ioname="IO.ConsoleIO", themename=None):
if type(ioname) is str:
ioobj = _get_object(ioname)
elif hasattr(ioname, "write"):
ioobj = ioname
else:
raise ValueError("ioname not a valid type")
if not hasattr(ioobj, "close"):
raise UIFindError("not a valid IO object: %r" % (ioobj,))
uiobj = _get_object(uiname)
if not hasattr(uiobj, "Print"):
raise UIFindError("not a valid UI object: %r" % (uiobj,))
if themename is not None:
themeobj = _get_object(themename)
if not issubclass(themeobj, Theme):
raise UIFindError("not a valid Theme object: %r." % (themeobj,))
return uiobj(ioobj(), theme=themeobj())
else:
return uiobj(ioobj())
def _test(argv):
ui = get_userinterface()
ui.Print("Hello world!")
inp = ui.user_input("Type something> ")
ui.Print("You typed:", inp)
return ui
if __name__ == "__main__":
ui = _test(sys.argv)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Volume v2 Backup action implementations"""
import copy
import logging
from cliff import command
from cliff import lister
from cliff import show
import six
from openstackclient.common import utils
class CreateBackup(show.ShowOne):
"""Create new backup"""
log = logging.getLogger(__name__ + ".CreateBackup")
def get_parser(self, prog_name):
parser = super(CreateBackup, self).get_parser(prog_name)
parser.add_argument(
"volume",
metavar="<volume>",
help="Volume to backup (name or ID)"
)
parser.add_argument(
"--name",
metavar="<name>",
required=True,
help="Name of the backup"
)
parser.add_argument(
"--description",
metavar="<description>",
help="Description of the backup"
)
parser.add_argument(
"--container",
metavar="<container>",
help="Optional backup container name"
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action: (%s)", parsed_args)
volume_client = self.app.client_manager.volume
volume_id = utils.find_resource(
volume_client.volumes, parsed_args.volume).id
backup = volume_client.backups.create(
volume_id,
container=parsed_args.container,
name=parsed_args.name,
description=parsed_args.description
)
backup._info.pop("links", None)
return zip(*sorted(six.iteritems(backup._info)))
class DeleteBackup(command.Command):
"""Delete backup(s)"""
log = logging.getLogger(__name__ + ".DeleteBackup")
def get_parser(self, prog_name):
parser = super(DeleteBackup, self).get_parser(prog_name)
parser.add_argument(
"backups",
metavar="<backup>",
nargs="+",
help="Backup(s) to delete (name or ID)"
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action: (%s)", parsed_args)
volume_client = self.app.client_manager.volume
for backup in parsed_args.backups:
backup_id = utils.find_resource(
volume_client.backups, backup).id
volume_client.backups.delete(backup_id)
return
class ListBackup(lister.Lister):
"""List backups"""
log = logging.getLogger(__name__ + ".ListBackup")
def get_parser(self, prog_name):
parser = super(ListBackup, self).get_parser(prog_name)
parser.add_argument(
"--long",
action="store_true",
default=False,
help="List additional fields in output"
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action: (%s)", parsed_args)
def _format_volume_id(volume_id):
"""Return a volume name if available
:param volume_id: a volume ID
:rtype: either the volume ID or name
"""
volume = volume_id
if volume_id in volume_cache.keys():
volume = volume_cache[volume_id].name
return volume
if parsed_args.long:
columns = ['ID', 'Name', 'Description', 'Status', 'Size',
'Availability Zone', 'Volume ID', 'Container']
column_headers = copy.deepcopy(columns)
column_headers[6] = 'Volume'
else:
columns = ['ID', 'Name', 'Description', 'Status', 'Size']
column_headers = columns
# Cache the volume list
volume_cache = {}
try:
for s in self.app.client_manager.volume.volumes.list():
volume_cache[s.id] = s
except Exception:
# Just forget it if there's any trouble
pass
data = self.app.client_manager.volume.backups.list()
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Volume ID': _format_volume_id},
) for s in data))
class RestoreBackup(show.ShowOne):
"""Restore backup"""
log = logging.getLogger(__name__ + ".RestoreBackup")
def get_parser(self, prog_name):
parser = super(RestoreBackup, self).get_parser(prog_name)
parser.add_argument(
"backup",
metavar="<backup>",
help="Backup to restore (ID only)"
)
parser.add_argument(
"volume",
metavar="<volume>",
help="Volume to restore to (name or ID)"
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action: (%s)", parsed_args)
volume_client = self.app.client_manager.volume
backup = utils.find_resource(volume_client.backups, parsed_args.backup)
destination_volume = utils.find_resource(volume_client.volumes,
parsed_args.volume)
return volume_client.restores.restore(backup.id, destination_volume.id)
class ShowBackup(show.ShowOne):
"""Display backup details"""
log = logging.getLogger(__name__ + ".ShowBackup")
def get_parser(self, prog_name):
parser = super(ShowBackup, self).get_parser(prog_name)
parser.add_argument(
"backup",
metavar="<backup>",
help="Backup to display (name or ID)")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action: (%s)", parsed_args)
volume_client = self.app.client_manager.volume
backup = utils.find_resource(volume_client.backups,
parsed_args.backup)
backup._info.pop("links", None)
return zip(*sorted(six.iteritems(backup._info)))
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
from six.moves import shlex_quote
import os
import shutil
import tempfile
import unittest
from test import _common
from test import helper
import beets
class PlaylistTestHelper(helper.TestHelper):
def setUp(self):
self.setup_beets()
self.lib = beets.library.Library(':memory:')
self.music_dir = os.path.expanduser(os.path.join('~', 'Music'))
i1 = _common.item()
i1.path = beets.util.normpath(os.path.join(
self.music_dir,
'a', 'b', 'c.mp3',
))
i1.title = u'some item'
i1.album = u'some album'
self.lib.add(i1)
self.lib.add_album([i1])
i2 = _common.item()
i2.path = beets.util.normpath(os.path.join(
self.music_dir,
'd', 'e', 'f.mp3',
))
i2.title = 'another item'
i2.album = 'another album'
self.lib.add(i2)
self.lib.add_album([i2])
i3 = _common.item()
i3.path = beets.util.normpath(os.path.join(
self.music_dir,
'x', 'y', 'z.mp3',
))
i3.title = 'yet another item'
i3.album = 'yet another album'
self.lib.add(i3)
self.lib.add_album([i3])
self.playlist_dir = tempfile.mkdtemp()
self.config['directory'] = self.music_dir
self.config['playlist']['playlist_dir'] = self.playlist_dir
self.setup_test()
self.load_plugins('playlist')
def setup_test(self):
raise NotImplementedError
def tearDown(self):
self.unload_plugins()
shutil.rmtree(self.playlist_dir)
self.teardown_beets()
class PlaylistQueryTestHelper(PlaylistTestHelper):
def test_name_query_with_absolute_paths_in_playlist(self):
q = u'playlist:absolute'
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def test_path_query_with_absolute_paths_in_playlist(self):
q = u'playlist:{0}'.format(shlex_quote(os.path.join(
self.playlist_dir,
'absolute.m3u',
)))
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def test_name_query_with_relative_paths_in_playlist(self):
q = u'playlist:relative'
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def test_path_query_with_relative_paths_in_playlist(self):
q = u'playlist:{0}'.format(shlex_quote(os.path.join(
self.playlist_dir,
'relative.m3u',
)))
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def test_name_query_with_nonexisting_playlist(self):
q = u'playlist:nonexisting'.format(self.playlist_dir)
results = self.lib.items(q)
self.assertEqual(set(results), set())
def test_path_query_with_nonexisting_playlist(self):
q = u'playlist:{0}'.format(shlex_quote(os.path.join(
self.playlist_dir,
self.playlist_dir,
'nonexisting.m3u',
)))
results = self.lib.items(q)
self.assertEqual(set(results), set())
class PlaylistTestRelativeToLib(PlaylistQueryTestHelper, unittest.TestCase):
def setup_test(self):
with open(os.path.join(self.playlist_dir, 'absolute.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'a', 'b', 'c.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'd', 'e', 'f.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'nonexisting.mp3')))
with open(os.path.join(self.playlist_dir, 'relative.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.join('a', 'b', 'c.mp3')))
f.write('{0}\n'.format(os.path.join('d', 'e', 'f.mp3')))
f.write('{0}\n'.format('nonexisting.mp3'))
self.config['playlist']['relative_to'] = 'library'
class PlaylistTestRelativeToDir(PlaylistQueryTestHelper, unittest.TestCase):
def setup_test(self):
with open(os.path.join(self.playlist_dir, 'absolute.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'a', 'b', 'c.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'd', 'e', 'f.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'nonexisting.mp3')))
with open(os.path.join(self.playlist_dir, 'relative.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.join('a', 'b', 'c.mp3')))
f.write('{0}\n'.format(os.path.join('d', 'e', 'f.mp3')))
f.write('{0}\n'.format('nonexisting.mp3'))
self.config['playlist']['relative_to'] = self.music_dir
class PlaylistTestRelativeToPls(PlaylistQueryTestHelper, unittest.TestCase):
def setup_test(self):
with open(os.path.join(self.playlist_dir, 'absolute.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'a', 'b', 'c.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'd', 'e', 'f.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'nonexisting.mp3')))
with open(os.path.join(self.playlist_dir, 'relative.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.relpath(
os.path.join(self.music_dir, 'a', 'b', 'c.mp3'),
start=self.playlist_dir,
)))
f.write('{0}\n'.format(os.path.relpath(
os.path.join(self.music_dir, 'd', 'e', 'f.mp3'),
start=self.playlist_dir,
)))
f.write('{0}\n'.format(os.path.relpath(
os.path.join(self.music_dir, 'nonexisting.mp3'),
start=self.playlist_dir,
)))
self.config['playlist']['relative_to'] = 'playlist'
self.config['playlist']['playlist_dir'] = self.playlist_dir
class PlaylistUpdateTestHelper(PlaylistTestHelper):
def setup_test(self):
with open(os.path.join(self.playlist_dir, 'absolute.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'a', 'b', 'c.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'd', 'e', 'f.mp3')))
f.write('{0}\n'.format(os.path.join(
self.music_dir, 'nonexisting.mp3')))
with open(os.path.join(self.playlist_dir, 'relative.m3u'), 'w') as f:
f.write('{0}\n'.format(os.path.join('a', 'b', 'c.mp3')))
f.write('{0}\n'.format(os.path.join('d', 'e', 'f.mp3')))
f.write('{0}\n'.format('nonexisting.mp3'))
self.config['playlist']['auto'] = True
self.config['playlist']['relative_to'] = 'library'
class PlaylistTestItemMoved(PlaylistUpdateTestHelper, unittest.TestCase):
def test_item_moved(self):
# Emit item_moved event for an item that is in a playlist
results = self.lib.items(u'path:{0}'.format(shlex_quote(
os.path.join(self.music_dir, 'd', 'e', 'f.mp3'))))
item = results[0]
beets.plugins.send(
'item_moved', item=item, source=item.path,
destination=beets.util.bytestring_path(
os.path.join(self.music_dir, 'g', 'h', 'i.mp3')))
# Emit item_moved event for an item that is not in a playlist
results = self.lib.items(u'path:{0}'.format(shlex_quote(
os.path.join(self.music_dir, 'x', 'y', 'z.mp3'))))
item = results[0]
beets.plugins.send(
'item_moved', item=item, source=item.path,
destination=beets.util.bytestring_path(
os.path.join(self.music_dir, 'u', 'v', 'w.mp3')))
# Emit cli_exit event
beets.plugins.send('cli_exit', lib=self.lib)
# Check playlist with absolute paths
playlist_path = os.path.join(self.playlist_dir, 'absolute.m3u')
with open(playlist_path, 'r') as f:
lines = [line.strip() for line in f.readlines()]
self.assertEqual(lines, [
os.path.join(self.music_dir, 'a', 'b', 'c.mp3'),
os.path.join(self.music_dir, 'g', 'h', 'i.mp3'),
os.path.join(self.music_dir, 'nonexisting.mp3'),
])
# Check playlist with relative paths
playlist_path = os.path.join(self.playlist_dir, 'relative.m3u')
with open(playlist_path, 'r') as f:
lines = [line.strip() for line in f.readlines()]
self.assertEqual(lines, [
os.path.join('a', 'b', 'c.mp3'),
os.path.join('g', 'h', 'i.mp3'),
'nonexisting.mp3',
])
class PlaylistTestItemRemoved(PlaylistUpdateTestHelper, unittest.TestCase):
def test_item_removed(self):
# Emit item_removed event for an item that is in a playlist
results = self.lib.items(u'path:{0}'.format(shlex_quote(
os.path.join(self.music_dir, 'd', 'e', 'f.mp3'))))
item = results[0]
beets.plugins.send('item_removed', item=item)
# Emit item_removed event for an item that is not in a playlist
results = self.lib.items(u'path:{0}'.format(shlex_quote(
os.path.join(self.music_dir, 'x', 'y', 'z.mp3'))))
item = results[0]
beets.plugins.send('item_removed', item=item)
# Emit cli_exit event
beets.plugins.send('cli_exit', lib=self.lib)
# Check playlist with absolute paths
playlist_path = os.path.join(self.playlist_dir, 'absolute.m3u')
with open(playlist_path, 'r') as f:
lines = [line.strip() for line in f.readlines()]
self.assertEqual(lines, [
os.path.join(self.music_dir, 'a', 'b', 'c.mp3'),
os.path.join(self.music_dir, 'nonexisting.mp3'),
])
# Check playlist with relative paths
playlist_path = os.path.join(self.playlist_dir, 'relative.m3u')
with open(playlist_path, 'r') as f:
lines = [line.strip() for line in f.readlines()]
self.assertEqual(lines, [
os.path.join('a', 'b', 'c.mp3'),
'nonexisting.mp3',
])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
import math
import random
import pygame
from pygame.locals import *
import koon.app
from koon.gui import *
from koon.geo import *
from koon.res import resman
import koon.gfx as gfx
import koon.input as input
from settings import GameType, Configuration
from player import *
from sndman import SoundManager
import scenarios
import control as ctrl
import pickups
class MonorailMenu:
def __init__( self, game_data ):
self.game_data = game_data
self.screen = ScreenGameSelect( game_data )
self._is_done = False
self.should_quit = False
self.guistate = GuiState()
self.music = resman.get("game.menu_music")
self.music.play( -1 )
def do_tick( self, userinput ):
SingleSwitch.feed_keys( userinput )
SingleSwitch.check_enable( userinput )
self.screen.tick( userinput, self.guistate )
if isinstance( self.screen, ScreenGameSelect ):
if self.screen.is_done():
if self.screen.should_quit:
self.should_quit = True
self._is_done = True
elif self.game_data.is_single_player():
self.screen = ScreenLevelSelect( self.game_data )
elif self.game_data.is_single_random():
self._is_done = True
else:
self._is_done = True
# self.screen = ScreenPlayerSelect( self.game_data )
elif isinstance( self.screen, ScreenLevelSelect ):
if self.screen.is_done():
if self.screen.get_state() == ScreenLevelSelect.PLAY:
self._is_done = True
elif self.screen.get_state() == ScreenLevelSelect.MENU:
self.screen = ScreenGameSelect( self.game_data )
elif isinstance( self.screen, ScreenPlayerSelect ):
if self.screen.is_done():
self._is_done = True
if self._is_done:
self.music.fadeout( 1000 )
def show_level_select( self ):
self.screen = ScreenLevelSelect( self.game_data )
self._is_done = False
self.music.play()
def show_main_menu( self ):
self.screen = ScreenGameSelect( self.game_data )
self._is_done = False
self.music.play()
def draw( self, surface, interpol, time_sec ):
surface.fill( (0,0,0) )
self.screen.draw( surface, interpol, time_sec )
def draw_mouse( self, surface, interpol, time_sec ):
x, y = pygame.mouse.get_pos()
resman.get("gui_surf").draw( surface, Vec2D(x, y), (0,0,32,32) )
def mouse_down( self, button ):
pass
def is_done( self ):
return self._is_done
class Credit:
FONT = None
FONT_BLACK = None
def __init__( self, text ):
self.text = text
self.pos = Vec2D( 0, 0 )
self.speed = -10
if Credit.FONT is None:
Credit.FONT = Font( "data/edmunds.ttf", color=(255,255,255), size=25, use_antialias = True )
Credit.FONT_BLACK = Font( "data/edmunds.ttf", color=(0,0,0), size=25, use_antialias = True )
def tick( self ):
self.speed += 0.3
self.pos.x += 3
self.pos.y += self.speed
def is_dead( self ):
return self.pos.y > -90 and self.speed > 0
def draw( self, surface, offset_x ):
Credit.FONT_BLACK.draw( self.text, surface, (172 + int(self.pos.x), int(self.pos.y) + offset_x + 602), Font.CENTER )
Credit.FONT.draw( self.text, surface, (170 + int(self.pos.x), int(self.pos.y) + offset_x + 600), Font.CENTER )
class CarAnimation:
class RailObject:
def __init__( self, weight ):
self.weight = weight
self.offset = 0.0
self.speed = 0.0
def tick( self, parent_offset ):
if self.offset > parent_offset:
if self.offset - 5 > parent_offset:
self.offset = parent_offset + 5
self.speed -= (self.weight * 1)
self.offset += self.speed
if self.offset <= parent_offset:
self.speed = 0.0
self.offset = parent_offset
elif self.offset < parent_offset:
self.speed += self.weight
self.offset += self.speed
if self.offset >= parent_offset:
self.speed = 0.0
self.offset = parent_offset
STATE_NORMAL, STATE_DOWN, STATE_UP, STATE_CREDITS = range(4)
CREDITS = [line for line in """-= Created by =-
Koonsolo
www.koonsolo.com
-= Programming =-
Koen Witters
-= Graphics =-
Koen Witters
-= Music =-
Jeremy Sherman
Heartland
-= Thanks to =-
Roel Guldentops
Leen Vander Kuylen
Maarten Vander Kuylen
William Van Haevre
Michael Van Loock
Nick Verhaert
Erik Wollebrants
-= Tools Used =-
Python
Pygame
Blender
The Gimp
Vim
Kubuntu
Audacity
""".splitlines()]
def __init__( self ):
self.carsprite = resman.get("game.introcar_sprite")
self.carsprite_car = resman.get("game.introcar_car")
self.carsprite_man = resman.get("game.introcar_man")
self.carsprite_hat = resman.get("game.introcar_hat")
self.carsprite_hat_front = resman.get("game.introcar_hat_front")
self.rails = CarAnimation.RailObject( 0 )
self.car = CarAnimation.RailObject( 1.5 )
self.man = CarAnimation.RailObject( 1.0 )
self.hat = CarAnimation.RailObject( 1.0 )
self.credits = []
self.credits_counter = 0
self.credits_index = 0
self.state = CarAnimation.STATE_NORMAL
def tick( self, userinput ):
self.tick_car( userinput )
# check click on hat
if userinput.mouse.went_down( input.Mouse.LEFT )\
and Rectangle(150, self.hat.offset + 150, 155, 50).contains(userinput.mouse.pos):
resman.get("gui.shot_sound").play()
self.state = CarAnimation.STATE_DOWN
if self.state == CarAnimation.STATE_CREDITS:
self.credits_counter += 1
if self.credits_counter > 25:
self.credits_counter = 0
self.credits.append( Credit( CarAnimation.CREDITS[ self.credits_index ] ) )
self.credits_index = (self.credits_index + 1) % len(CarAnimation.CREDITS)
new_credits = []
for credit in self.credits:
credit.tick()
if not credit.is_dead():
new_credits.append( credit )
self.credits = new_credits
def tick_car( self, userinput ):
self.carsprite.nr = (self.carsprite.nr + 1) % 5
if self.state == CarAnimation.STATE_DOWN:
offset = 400
if self.rails.offset >= 400:
self.state = CarAnimation.STATE_UP
elif self.state == CarAnimation.STATE_UP:
offset = -150
if self.rails.offset <= -150:
self.state = CarAnimation.STATE_CREDITS
elif self.state == CarAnimation.STATE_CREDITS:
offset = -150
self.rails.offset = -150
else:
offset = random.randint(-200, 105)
if self.rails.offset > offset:
self.rails.speed -= 4.7
else:
self.rails.speed += 4.7
self.rails.speed = max((min((self.rails.speed,8)), -8))
self.rails.offset += self.rails.speed
self.car.tick( self.rails.offset )
self.man.tick( self.car.offset )
self.hat.tick( self.man.offset )
def draw( self, surface, interpol, time_sec ):
if self.state == CarAnimation.STATE_CREDITS:
self.draw_credits( surface, False )
self.carsprite.draw( surface, Vec2D(-150, 80 + self.rails.offset) )
if self.state == CarAnimation.STATE_NORMAL or self.state == CarAnimation.STATE_DOWN:
if self.state == CarAnimation.STATE_NORMAL:
self.carsprite_hat.draw( surface, Vec2D(-150, 80 + self.hat.offset) )
self.carsprite_man.draw( surface, Vec2D(-150, 80 + self.man.offset) )
if self.state == CarAnimation.STATE_NORMAL:
self.carsprite_hat_front.draw( surface, Vec2D(-150, 80 + self.hat.offset) )
self.carsprite_car.draw( surface, Vec2D(-150, 80 + self.car.offset) )
if self.state == CarAnimation.STATE_CREDITS:
self.draw_credits( surface, True )
def draw_credits( self, surface, is_on_top ):
for credit in self.credits:
if credit.speed >= 0 and not is_on_top:
credit.draw( surface, self.rails.offset )
if credit.speed < 0 and is_on_top:
credit.draw( surface, self.rails.offset )
class SingleSwitch:
is_enabled = False
next_cnt = 0
esc_timers = {}
esc_went_down = False
scan_timeout = 60
@staticmethod
def tick( indev, guistate ):
if SingleSwitch.is_enabled:
if indev.any_went_down():
SingleSwitch.next_cnt = 0
SingleSwitch.next_cnt += 1
if SingleSwitch.next_cnt >= SingleSwitch.scan_timeout:
SingleSwitch.next_cnt = 0
if guistate is not None:
guistate.activate_next()
# Remove non down buttons
new_timers = {}
for key, value in SingleSwitch.esc_timers.items():
if key.button in key.dev.down_buttons:
new_timers[ key ] = value
SingleSwitch.esc_timers = new_timers
# Add buttons
SingleSwitch.esc_went_down = False
for dev in indev.devs_no_mouse:
for key in dev.down_buttons:
btn = input.Button( dev, key )
if SingleSwitch.esc_timers.has_key( btn ):
SingleSwitch.esc_timers[ btn ] += 1
else:
SingleSwitch.esc_timers[ btn ] = 1
if SingleSwitch.esc_timers[ btn ] == 25:
SingleSwitch.esc_went_down = True
@staticmethod
def feed_keys( indev ):
if SingleSwitch.is_enabled:
if indev.any_went_down():
if K_SPACE not in indev.key.went_down_buttons:
indev.key.feed_down( K_SPACE )
if indev.any_went_up():
if K_SPACE not in indev.key.went_up_buttons:
indev.key.feed_up( K_SPACE )
@staticmethod
def check_enable( indev ):
if not SingleSwitch.is_enabled:
# Remove non down buttons
new_timers = {}
for key, value in SingleSwitch.esc_timers.items():
if key.button in key.dev.down_buttons:
new_timers[ key ] = value
SingleSwitch.esc_timers = new_timers
# Add buttons
SingleSwitch.esc_went_down = False
for dev in indev.devs_no_mouse:
for key in dev.down_buttons:
btn = input.Button( dev, key )
if SingleSwitch.esc_timers.has_key( btn ):
SingleSwitch.esc_timers[ btn ] += 1
else:
SingleSwitch.esc_timers[ btn ] = 1
if SingleSwitch.esc_timers[ btn ] == 25 * 3:
SingleSwitch.is_enabled = True
class ScreenGameSelect (Screen):
def __init__( self, game_data ):
super(type(self), self).__init__()
self.game_data = game_data
self._is_done = False
self.should_quit = False
btnFont = Font( "data/edmunds.ttf", color=(0,0,0), size=30, use_antialias = True )
BUTTON_X = 550
BUTTON_Y = 180
H = 65
self.adventure_btn = ImageButton( copy.copy(resman.get("game.button01_sprite")), Vec2D(BUTTON_X, BUTTON_Y) )
self.adventure_btn.set_label(_("Adventure"), btnFont )
self.quick_play = ImageButton( copy.copy(resman.get("game.button01_sprite")), Vec2D(BUTTON_X, BUTTON_Y + H) )
self.quick_play.set_label( _("Quick play"), btnFont )
self.multiplayer_btn = ImageButton( copy.copy(resman.get("game.button01_sprite")), Vec2D(BUTTON_X, BUTTON_Y + 2*H) )
self.multiplayer_btn.set_label( _("Multiplayer"), btnFont )
self.options_btn = ImageButton( copy.copy(resman.get("game.button01_sprite")), Vec2D(BUTTON_X, BUTTON_Y + 3*H) )
self.options_btn.set_label( _("Options"), btnFont )
self.quit_btn = ImageButton( copy.copy(resman.get("game.button01_sprite")), Vec2D(BUTTON_X, BUTTON_Y + 4*H) )
self.quit_btn.set_label( _("Quit"), btnFont )
if Configuration.get_instance().unlocked_level == 0:
self.quick_play.is_enabled = False
self.multiplayer_btn.is_enabled = False
self.add_subcomponent( self.adventure_btn )
self.add_subcomponent( self.quick_play )
self.add_subcomponent( self.multiplayer_btn )
self.add_subcomponent( self.options_btn )
self.add_subcomponent( self.quit_btn )
self.update_neighbors()
self.dialog = None
self.background = resman.get("gui.logo_surf")
self.crate_hud = CrateHud( game_data )
self.car_animation = CarAnimation()
def tick( self, userinput, guistate ):
if self.dialog is None:
super(type(self), self).tick( userinput, guistate )
if self.adventure_btn.went_down():
Event.button()
self.game_data.set_type( GameType.SINGLE_SEQUENCE )
self._is_done = True
elif self.quick_play.went_down():
Event.button()
self.game_data.set_type( GameType.SINGLE_RANDOM )
self._is_done = True
elif self.multiplayer_btn.went_down():
Event.button()
self.dialog = ScreenPlayerSelect(self.game_data)
self.add_subcomponent( self.dialog )
self.game_data.set_type( GameType.MULTI_RANDOM )
self._is_enabled = False
elif self.options_btn.went_down():
Event.button()
self.dialog = OptionsDialog(self.game_data)
self.add_subcomponent( self.dialog )
self.is_enabled = False
elif self.quit_btn.went_down():
Event.button()
self.should_quit = True
self._is_done = True
SingleSwitch.tick( userinput, guistate )
else:
self.dialog.tick( userinput, guistate )
if self.dialog.is_done():
if isinstance(self.dialog, ScreenPlayerSelect) and \
not self.dialog.cancelled:
self._is_done = True
self.remove_subcomponent( self.dialog )
self.dialog = None
self.is_enabled = True
self.crate_hud.tick()
self.car_animation.tick( userinput )
def draw( self, surface, interpol, time_sec ):
self.background.draw( surface, (0, 0) )
self.car_animation.draw( surface, interpol, time_sec )
self.crate_hud.draw( surface )
Screen.draw( self, surface, interpol, time_sec )
def is_done( self ):
return self._is_done
class OptionsDialog (Dialog):
def __init__( self, game_data ):
super(type(self), self).__init__( Rectangle(140, 80, 800-200, 600-200 ) )
self.background_image = resman.get("gui.paperdialog_surf")
self._is_done = False
self.game_data = game_data
self.dialog = None
self.config = Configuration.get_instance()
btnFont = Font( "data/edmunds.ttf", color=(0,0,0), size=32, use_antialias = True )
self.sound_lbl = Label( Vec2D(200, 130), _("Sound"), btnFont )
star = ImageButton( copy.copy(resman.get("gui.sheriffstar_sprite") ) )
self.sound_slider = ImageSlider( Vec2D( 320, 140 ), copy.copy(resman.get("gui.slider_sprite")), star )
self.music_lbl = Label( Vec2D(200, 195), _("Music"), btnFont )
star = ImageButton( copy.copy(resman.get("gui.sheriffstar_sprite") ) )
self.music_slider = ImageSlider( Vec2D( 320, 205 ), copy.copy(resman.get("gui.slider_sprite")), star )
self.fullscreen_btn = ImageButton( copy.copy(resman.get("game.button02_sprite")), Vec2D(300,260) )
self.update_fullscreen_label()
self.access_btn = ImageButton( copy.copy(resman.get("game.button02_sprite")), Vec2D(300,340) )
self.access_btn.set_label( _("Accessibility"), btnFont )
self.close_btn = ImageButton( copy.copy(resman.get("game.button02_sprite")), Vec2D(300,420) )
self.close_btn.set_label( _("Close"), btnFont )
self.add_subcomponent( self.sound_lbl )
self.add_subcomponent( self.sound_slider )
self.add_subcomponent( self.music_lbl )
self.add_subcomponent( self.music_slider )
self.add_subcomponent( self.fullscreen_btn )
self.add_subcomponent( self.access_btn )
self.add_subcomponent( self.close_btn )
self.sound_slider.set_value( SoundManager.get_sound_volume() )
self.music_slider.set_value( SoundManager.get_music_volume() )
self.update_neighbors()
def tick( self, userinput, guistate ):
if self.dialog is None:
Dialog.tick( self, userinput, guistate )
if self.close_btn.went_down():
Event.button()
self._is_done = True
if self.access_btn.went_down():
self.dialog = AccessDialog(self.game_data)
self.add_subcomponent( self.dialog )
self.is_enabled = False
if self.fullscreen_btn.went_down():
Event.button()
self.config.is_fullscreen = not self.config.is_fullscreen
if not self.config.is_fullscreen:
pygame.display.set_mode(self.config.resolution)
else:
pygame.display.set_mode(self.config.resolution, pygame.FULLSCREEN)
self.update_fullscreen_label()
if self.sound_slider.value_changed():
self.config.sound_volume = self.sound_slider.get_value()
SoundManager.set_sound_volume( self.config.sound_volume )
if self.sound_slider.went_up():
Event.sound_test()
if self.music_slider.value_changed():
self.config.music_volume = self.music_slider.get_value()
SoundManager.set_music_volume( self.config.music_volume )
SingleSwitch.tick( userinput, self.guistate )
else:
self.dialog.tick( userinput, guistate )
if self.dialog.is_done():
self.remove_subcomponent( self.dialog )
self.dialog = None
self.is_enabled = True
def update_fullscreen_label( self ):
btnFont = Font( "data/edmunds.ttf", color=(0,0,0), size=32, use_antialias = True )
if self.config.is_fullscreen:
self.fullscreen_btn.set_label( _("Windowed"), btnFont )
else:
self.fullscreen_btn.set_label( _("Fullscreen"), btnFont )
def is_done( self ):
self.config.save()
return self._is_done
class AccessDialog (Dialog):
def __init__( self, game_data ):
Dialog.__init__( self, Rectangle(140, 80, 800-200, 600-200 ) )
self.background_image = resman.get("gui.paperdialog_surf")
self._is_done = False
self.game_data = game_data
self.config = Configuration.get_instance()
btnFont = Font( "data/edmunds.ttf", color=(0,0,0), size=32, use_antialias = True )
# Game speed, Scroll speed, Single Button mode
self.speed0_lbl = Label( Vec2D(200, 140), _("Game"), btnFont )
self.speed1_lbl = Label( Vec2D(200, 170), _("Speed"), btnFont )
star = ImageButton( copy.copy(resman.get("gui.sheriffstar_sprite") ) )
self.speed_slider = ImageSlider( Vec2D( 320, 170 ), copy.copy(resman.get("gui.slider_sprite")), star )
self.oneswitch_btn = ImageButton( copy.copy(resman.get("game.button02_sprite")), Vec2D(300,240) )
self.update_oneswitch_label()
self.scan0_lbl = Label( Vec2D(200, 310), _("Scan"), btnFont )
self.scan1_lbl = Label( Vec2D(200, 340), _("Speed"), btnFont )
star = ImageButton( copy.copy(resman.get("gui.sheriffstar_sprite") ) )
self.scan_slider = ImageSlider( Vec2D( 320, 340 ), copy.copy(resman.get("gui.slider_sprite")), star )
self.close_btn = ImageButton( copy.copy(resman.get("game.button02_sprite")), Vec2D(300,400) )
self.close_btn.set_label( _("Close"), btnFont )
self.add_subcomponent( self.speed0_lbl )
self.add_subcomponent( self.speed1_lbl )
self.add_subcomponent( self.speed_slider )
self.add_subcomponent( self.oneswitch_btn )
self.add_subcomponent( self.scan0_lbl )
self.add_subcomponent( self.scan1_lbl )
self.add_subcomponent( self.scan_slider )
self.add_subcomponent( self.close_btn )
self.speed_slider.set_value( self.config.game_speed )
self.scan_slider.set_value( 1.0 - ((self.config.scan_speed - 20) / float(40)) )
self.update_neighbors()
def tick( self, userinput, guistate ):
Dialog.tick( self, userinput, guistate )
if self.close_btn.went_down():
Event.button()
self._is_done = True
if self.oneswitch_btn.went_down():
Event.button()
SingleSwitch.is_enabled = not SingleSwitch.is_enabled
self.config.one_switch = SingleSwitch.is_enabled
self.update_oneswitch_label()
if self.speed_slider.value_changed():
self.config.game_speed = lin_ipol(self.speed_slider.get_value(), 0.4, 1.0)
koon.app.set_game_speed(self.config.game_speed)
if self.scan_slider.value_changed():
SingleSwitch.scan_timeout = int(20 + (1.0 - self.scan_slider.get_value()) * 40)
self.config.scan_speed = SingleSwitch.scan_timeout
SingleSwitch.tick( userinput, self.guistate )
def update_oneswitch_label( self ):
btnFont = Font( "data/edmunds.ttf", color=(0,0,0), size=32, use_antialias = True )
if not SingleSwitch.is_enabled:
self.oneswitch_btn.set_label( _("One Switch"), btnFont )
else:
self.oneswitch_btn.set_label( _("Normal Mode"), btnFont )
def is_done( self ):
self.config.save()
return self._is_done
class ScreenPlayerSelect (Dialog):
def __init__( self, game_data ):
super(ScreenPlayerSelect, self).__init__( Rectangle(140, 80, 800-200, 600-200 ) )
self.background_image = resman.get("gui.paperdialog_surf")
self.game_data = game_data
self._is_done = False
self.cancelled = False
self.stage = StageHumanCount()
def tick( self, userinput, guistate ):
super(type(self), self).tick( userinput, guistate )
self.stage.tick( userinput, guistate )
if isinstance( self.stage, StageHumanCount ):
cnt = self.stage.get_player_count()
if cnt is not None:
if cnt != -1:
self.stage = StagePlayerConfig( self.game_data, cnt )
else:
self.cancelled = True
self._is_done = True
if isinstance( self.stage, StagePlayerConfig ):
if self.stage.is_done():
self._is_done = True
def draw( self, surface, interpol, time_sec ):
Dialog.draw( self, surface, interpol, time_sec )
self.stage.draw( surface, interpol, time_sec )
def is_done( self ):
return self._is_done
class StageHumanCount (Component):
def __init__( self ):
Component.__init__( self )
btnFont = Font( "data/edmunds.ttf", color=(0,0,0), size=32, use_antialias = True )
self.add_subcomponent( Label( Vec2D(200, 200), _("Number of players?"), btnFont ) )
self.buttons = []
for i in range( 2, 7 ):
self.buttons.append( Button( Rectangle(365, 180 + i * 40, 35, 35) ) )
self.buttons[-1].set_label( str(i), btnFont )
self.add_subcomponent( self.buttons[-1] )
self.guistate = GuiState()
self.update_neighbors()
self.player_count = None
def tick( self, userinput, guistate ):
self.guistate.update(userinput, self)
Component.tick( self, userinput, self.guistate )
i = 1
for btn in self.buttons:
if btn.went_down():
Event.button()
self.player_count = i+1
i += 1
if userinput.key.went_down( K_ESCAPE ) or \
userinput.joys.any_went_down( Joystick.BTN_BACK ):
self.player_count = -1
SingleSwitch.tick( userinput, self.guistate )
def get_player_count( self ):
return self.player_count
class StagePlayerConfig (Component):
def __init__( self, game_data, player_cnt ):
Component.__init__( self )
self.game_data = game_data
self.game_data.goldcars = []
self.player_cnt = player_cnt
self.current_player = 1
btnFont = Font( "data/edmunds.ttf", color=(0,0,0), size=32, use_antialias = True )
self.textLabel = Label( Vec2D(250, 300), _("Player 1, press button!"), btnFont )
self.car_sprite = resman.get("game.car1_sprite")
self.carLabel = Label( Vec2D(400, 240), image=self.car_sprite )
self.anim_timer = LoopAnimationTimer( 15, 40, 12 )
self.add_subcomponent( self.textLabel )
self.add_subcomponent( self.carLabel )
self.forbidden_buttons = [input.Button(self.game_data.userinput.key, K_ESCAPE)]
for joy in self.game_data.userinput.joys:
self.forbidden_buttons.append(input.Button(joy, Joystick.BTN_BACK))
def tick( self, userinput, guistate ):
Component.tick( self, userinput, guistate )
for dev in userinput.devs_no_mouse:
if dev.any_went_down():
Event.playerkey()
button = input.Button(dev, dev.went_down_buttons[0])
if button not in self.forbidden_buttons:
resman.get("gui.key_good_sound").play()
self.forbidden_buttons.append( button )
if self.current_player <= self.player_cnt:
player_data = [_("Player %d") % self.current_player, ctrl.HumanController( None, button )]
self.game_data.goldcars.append( player_data )
self.current_player += 1
if self.current_player <= self.player_cnt:
self.update_labels()
else:
resman.get("gui.key_bad_sound").play()
def update_labels( self ):
self.textLabel.set_text( _("Player %d, press button!") % self.current_player )
self.car_sprite = resman.get( "game.car%d_sprite" % self.current_player )
self.carLabel.set_image( self.car_sprite )
def is_done( self ):
return self.current_player > self.player_cnt
def draw( self, surface, interpol, time_sec ):
self.car_sprite.nr = self.anim_timer.get_frame( time_sec )
Component.draw( self, surface, interpol, time_sec )
class ScreenLevelSelect (Screen):
UNLOCK, LEVELS, CONGRATS, EDIT, PLAY, MENU = range(6)
def __init__( self, game_data ):
Screen.__init__( self )
self.game_data = game_data
self._is_done = False
btnFont = Font( "data/edmunds.ttf", color=(64,64,64), size=22, use_antialias = True )
self.menu_btn = ImageButton( copy.copy(resman.get("game.lvl_left_button")), Vec2D(35, 500) )
self.menu_btn.set_label( _(" Menu"), btnFont )
self.play_btn = ImageButton( copy.copy(resman.get("game.lvl_right_button")), Vec2D(800-158-35,500) )
self.play_btn.set_label( _("Play "), btnFont )
self.add_subcomponent( self.menu_btn )
self.add_subcomponent( self.play_btn )
self._init_levelpoints( self.game_data )
if self.game_data.finished_game():
self.state = ScreenLevelSelect.CONGRATS
self.unlock_timer = 0
self.fireworks = Fireworks()
elif self.game_data.can_unlock_item():
self.state = ScreenLevelSelect.UNLOCK
self.unlock_timer = 0
else:
self.state = ScreenLevelSelect.LEVELS
self.background = resman.get("gui.levelselect_surf")
self.font = pygame.font.Font("data/edmunds.ttf", 20)
self.fontL = pygame.font.Font("data/edmunds.ttf", 28)
self.scenario = self.game_data.get_quest().create_scenario(self.game_data.skill_level.value)
self.info = ScenarioInfo( self.scenario, self.game_data )
self.levelpoints[ self.game_data.get_quest().progress ].set_selected()
desc_font = Font( "data/edmunds.ttf", color=(0,0,0), size=20, use_antialias = True )
self.description_field = TextField( Rectangle(250, 450, 300, 140), desc_font )
self.description_field.is_enabled = False
self.add_subcomponent( self.description_field )
self.crate_hud = CrateHud( game_data )
self.update_neighbors()
self.init_active = True
def _init_levelpoints( self, game_data, cheat = False ):
self.lines = [[(162, 399), (127, 374), (99, 352)], [(58, 354), (49, 287), (55, 235), (64, 210), (89, 196), (123, 196)], [(156, 194), (149, 156), (133, 123), (109, 101), (81, 95), (62, 95)], [(129, 61), (160, 51), (185, 45), (218, 48), (251, 57), (278, 60)], [(319, 67), (334, 96), (348, 121), (348, 142), (342, 159), (326, 167)], [(286, 186), (289, 205), (294, 228), (312, 267)], [(324, 278), (341, 292), (367, 309), (395, 320), (422, 322), (450, 314), (484, 295), (503, 273)], [(472, 260), (497, 244), (529, 232), (552, 204), (556, 171), (532, 158)], [(472, 162), (437, 154), (406, 137), (407, 112), (418, 92), (451, 81), (494, 73), (524, 78), (701, 103), (729, 121), (739, 155), (725, 181), (702, 186), (676, 177), (650, 171), (641, 182), (640, 213), (655, 232)], [(750, 214), (733, 240), (717, 270), (692, 289), (662, 318), (641, 349), (646, 372)], [(732, 393), (696, 421), (657, 453)]]
# self.lines = [[(98, 443), (165, 372)], [(200, 384), (281, 323), (267, 295)], [(189, 315), (105, 302), (138, 264), (222, 254)], [(257, 270), (306, 216), (262, 184)], [(202, 218), (116, 205), (103, 163), (209, 146), (281, 119)], [(322, 131), (400, 110), (435, 131), (407, 159)], [(386, 155), (379, 189), (448, 219), (457, 241)], [(484, 238), (497, 269), (472, 292), (424, 295)], [(399, 291), (390, 323), (478, 337), (565, 308)], [(575, 305), (582, 270), (601, 259), (559, 245), (565, 227)], [(584, 222), (588, 200), (554, 182)], [(535, 190), (489, 157), (526, 130), (523, 113), (566, 89), (699, 84), (751, 95), (762, 115), (739, 141), (745, 159), (678, 196), (693, 230)], [(710, 238), (694, 277)], [(682, 281), (684, 308), (744, 326), (757, 370), (737, 394)], [(706, 385), (667, 449)]]
self.lines_length = 0.0
for seq in self.lines:
for i in range( 0, len(seq) - 1 ):
a = Vec2D( seq[i][0], seq[i][1] )
b = Vec2D( seq[i+1][0], seq[i+1][1] )
diff = a - b
self.lines_length += diff.length()
self.levelpoints = Radiobuttons()
total_points = game_data.quest.get_level_count()
config = Configuration.get_instance()
for i in range( total_points ):
sprite = resman.get("gui.levelpoint_sprite").clone()
levelpoint = ImageCheckbox( sprite, self._get_point_place(i, total_points) )
levelpoint.is_enabled = (i <= config.unlocked_level) or cheat
self.levelpoints.append( levelpoint )
def _get_point_place( self, i, total ):
pos = i * self.lines_length / total
pos_it = 0.0
prev_pos_it = 0.0
for seq in self.lines:
for i in range( 0, len(seq) - 1 ):
a = Vec2D( seq[i][0], seq[i][1] )
b = Vec2D( seq[i+1][0], seq[i+1][1] )
diff = b - a
pos_it += diff.length()
if pos_it >= pos:
interpol = (pos - prev_pos_it) / (pos_it - prev_pos_it)
diff *= interpol
return a + diff - Vec2D(8,8)
prev_pos_it = pos_it
assert False
def tick( self, userinput, guistate ):
if self.state == ScreenLevelSelect.LEVELS:
if self.init_active:
guistate.set_active( self.play_btn )
self.init_active = False
super(type(self), self).tick( userinput, guistate )
self.levelpoints.tick( userinput, guistate )
if self.levelpoints.get_selected_index() is not None and \
self.game_data.get_quest().progress != self.levelpoints.get_selected_index():
self.game_data.get_quest().to_level( self.levelpoints.get_selected_index() )
self.game_data.save_single_player_progress()
self.scenario = self.game_data.get_quest().create_scenario(self.game_data.skill_level.value)
self.info = ScenarioInfo( self.scenario, self.game_data )
action_button = self.game_data.goldcars[0][1].action_button
if self.menu_btn.went_down():
Event.button()
self.state = ScreenLevelSelect.MENU
self._is_done = True
elif self.play_btn.went_down()\
or action_button.went_down():
Event.button()
self.state = ScreenLevelSelect.PLAY
self._is_done = True
# Cheat code for unlocking all levels
elif userinput.key.is_down(K_F5) and userinput.key.is_down(K_F8) and userinput.key.went_down(K_PAGEDOWN):
self._init_levelpoints(self.game_data, True)
self.description_field.text = self.scenario.description
SingleSwitch.tick( userinput, guistate )
elif self.state == ScreenLevelSelect.UNLOCK:
if self.unlock_timer == 5:
self.crate_hud.start_unlock()
resman.get("gui.unlock_sound").play(2)
elif self.unlock_timer == 20*2:
self.crate_hud.stop_unlock()
self.game_data.unlock_item()
elif self.unlock_timer == 20*5:
self.state = ScreenLevelSelect.LEVELS
self.crate_hud.tick()
self.unlock_timer += 1
elif self.state == ScreenLevelSelect.CONGRATS:
self.fireworks.tick()
self.unlock_timer += 1
if self.unlock_timer > 25*5 and \
(userinput.key.any_went_down() or userinput.mouse.any_went_down()):
self.state = ScreenLevelSelect.LEVELS
else:
if userinput.mouse.went_down( Mouse.LEFT ):
self.lines[-1].append( (userinput.mouse.pos.x,
userinput.mouse.pos.y) )
print self.lines[-1][-1]
if userinput.key.went_down( K_n ):
print "new line"
self.lines.append([])
elif userinput.key.went_down( K_p ):
print "self.lines =", self.lines
def draw( self, surface, interpol, time_sec ):
self.background.draw( surface, (0,0) )
Screen.draw( self, surface, interpol, time_sec )
self.levelpoints.draw( surface, interpol, time_sec )
center = Vec2D( surface.get_width()/2, surface.get_height()/2 )
if self.state == ScreenLevelSelect.LEVELS:
self.info.draw_title( surface, time_sec, (center.x, 410) )
self.info.draw_pickup( surface, time_sec, (center.x + 30, 600-60 ) )
self.info.draw_opponents( surface, time_sec, (center.x - 120 + 35, 600-60+17 ) )
# draw skill level
txt = self.font.render( _("skill: %(name)s (%(value).0f)") % {"name":self.game_data.skill_level.name, "value":self.game_data.skill_level.value*100}, True, (255,255,255) )
surface.blit( txt, (240, 340) )
elif self.state == ScreenLevelSelect.UNLOCK:
y = 410
txt = self.fontL.render( _("Unlocking item"), True, (0,0,0) )
surface.blit( txt, (center.x - txt.get_width()/2, y) )
self.crate_hud.draw( surface )
# draw skill level
txt = self.font.render( _("skill: %(name)s (%(value).0f)") % {"name":self.game_data.skill_level.name, "value":self.game_data.skill_level.value*100}, True, (255,255,255) )
surface.blit( txt, (240, 340) )
elif self.state == ScreenLevelSelect.CONGRATS:
self.fireworks.draw( surface, interpol, time_sec )
## # draw help lines
## for seq in self.lines:
## for i in range( 0, len(seq) - 1 ):
## a = ( seq[i][0], seq[i][1] )
## b = ( seq[i+1][0], seq[i+1][1] )
##
## pygame.draw.line( surface, (255,0,0), a, b )
def is_done( self ):
return self._is_done
def get_state( self ):
return self.state
class Fireworks:
GRAVITY = Vec2D( 0, 1 )
sprite = None
class Spot:
def __init__( self, color, pos, speed, damping, life_count, sub_color ):
if color >= 0:
self.color = color
else:
self.color = random.randint(0, 6)
self.pos = pos
self.speed = speed
self.damping = damping
self.life_count = life_count
self.sub_color = sub_color
if Fireworks.sprite is None:
Fireworks.sprite = resman.get("gui.fireworks_sprite").clone()
def tick( self ):
self.speed = self.speed * self.damping + Fireworks.GRAVITY
self.pos = self.pos + self.speed
self.life_count -= 1
def is_alive( self ):
return self.life_count > 0
def explode( self ):
if self.sub_color is None:
return []
else:
Event.fireworks_explode()
list = []
for i in range(0,100):
speed = Vec2D( random.uniform(-15, 15), random.uniform(-15, 15) )
if speed.length() > 0:
speed = speed.normalize() * random.uniform(1,15)
if self.sub_color >= 0:
color = self.sub_color
else:
color = random.randint(0,6)
list.append( Fireworks.Spot( color, self.pos, speed, 0.9, random.randint(5, 12), None ) )
return list
def draw( self, surface, interpol ):
Fireworks.sprite.nr = self.color
pos = self.pos + (self.speed * interpol)
Fireworks.sprite.draw( surface, pos )
def __init__( self ):
self.spots = []
self.dark_surf = gfx.Surface( (800, 600) )
self.dark_surf.pysurf.fill( (0, 0, 0, 160) )
self.dark_surf.pysurf.set_alpha( 160 )
self.congrats_surf = gfx.Surface("data/gfx/congrats.png")
def tick( self ):
if len( self.spots ) == 0:
Event.fireworks_start()
for i in range(0, random.randint(1, 6) ):
pos = Vec2D( random.randint( 200, 600 ), random.randint( 300, 500 ) )
speed = Vec2D( random.uniform(-2, 2 ), random.uniform( -20, -30 ) )
color = random.randint(-1, 5)
self.spots.append( Fireworks.Spot( color, pos, speed, 1.0, random.randint(12, 25), color ) )
new_spots = []
for spot in self.spots:
spot.tick()
if spot.is_alive():
new_spots.append( spot )
else:
new_spots.extend( spot.explode() )
self.spots = new_spots
def draw( self, surface, interpol, time_sec ):
self.dark_surf.draw( surface, (0,0) )
self.congrats_surf.draw( surface, (70,350) )
for spot in self.spots:
spot.draw( surface, interpol )
class ScenarioInfo:
def __init__( self, scenario, game_data = None ):
self.scenario = scenario
self.game_data = game_data
self.font = gfx.Font("data/edmunds.ttf", 20, (0,0,0), True)
self.title_font = gfx.Font("data/edmunds.ttf", 28, (0,0,0), True)
self.init_pickup_surf()
self.init_title_sprites()
def init_pickup_surf( self ):
self.pickup_surf = None
self.pickup_y = 30
if len(self.scenario.pickups) > 0:
pickup = self.scenario.pickups[0]
if pickup is pickups.Dynamite:
self.pickup_surf = resman.get("game.dynamite_sprite").clone()
elif pickup is Torch:
self.pickup_surf = resman.get("game.torch_sprite").clone()
self.pickup_y = 10
elif pickup is Key:
self.pickup_surf = resman.get("game.key_sprite").clone()
self.pickup_y = 10
elif pickup is Oiler:
self.pickup_surf = resman.get("game.oiler_sprite").clone()
elif pickup is Balloon:
self.pickup_surf = resman.get("game.balloon_sprite").clone()
self.pickup_y = 15
elif pickup is Ghost:
self.pickup_surf = resman.get("game.ghost_sprite").clone()
self.pickup_y = 0
def init_title_sprites( self ):
self.title_sprite_left = None
self.title_sprite_right = None
self.title_sprite_left_y = 0
self.title_sprite_right_y = 0
if isinstance( self.scenario, scenarios.ScenarioCoinCollect ):
self.title_sprite_left = resman.get("game.copper_sprite").clone()
self.title_sprite_right = resman.get("game.copper_sprite").clone()
self.left_anim_timer = gfx.LoopAnimationTimer( 25, 0, self.title_sprite_left.max_x )
self.right_anim_timer = gfx.LoopAnimationTimer( 25, 0, self.title_sprite_left.max_x )
self.title_sprite_left_y = 23
self.title_sprite_right_y = 23
elif isinstance( self.scenario, scenarios.ScenarioHoldLamp ):
self.title_sprite_left = resman.get("game.lamp_sprite").clone()
self.title_sprite_right = resman.get("game.lamp_sprite").clone()
self.left_anim_timer = None
self.right_anim_timer = None
self.title_sprite_left_y = 33
self.title_sprite_right_y = 33
elif isinstance( self.scenario, scenarios.ScenarioCutter ):
self.title_sprite_left = resman.get("game.axe_sprite").clone()
self.title_sprite_right = resman.get("game.gold_sprite").clone()
self.left_anim_timer = gfx.PingPongTimer( 25, 0, 8 )
self.right_anim_timer = gfx.LoopAnimationTimer( 25, 0, 15 )
self.title_sprite_left_y = 26
self.title_sprite_right_y = 33
elif isinstance( self.scenario, scenarios.ScenarioBlowup ):
self.title_sprite_left = resman.get("game.dynamite_sprite").clone()
self.title_sprite_right = resman.get("game.dynamite_sprite").clone()
self.left_anim_timer = None
self.right_anim_timer = None
self.title_sprite_left_y = 35
self.title_sprite_right_y = 35
elif isinstance( self.scenario, scenarios.ScenarioRace ):
self.title_sprite_left = resman.get("game.flag1_sprite").clone()
self.title_sprite_right = resman.get("game.flag2_sprite").clone()
self.left_anim_timer = gfx.LoopAnimationTimer( 20, 0, 8 )
self.right_anim_timer = gfx.LoopAnimationTimer( 20, 0, 8 )
self.title_sprite_left_y = 7
self.title_sprite_right_y = 7
elif isinstance( self.scenario, scenarios.ScenarioCollectRocks ):
self.title_sprite_left = resman.get("game.rock_sprite").clone()
self.title_sprite_right = resman.get("game.rock_sprite").clone()
self.left_anim_timer = gfx.LoopAnimationTimer( 25, 0, 15 )
self.right_anim_timer = gfx.LoopAnimationTimer( 25, 0, 15 )
self.title_sprite_left_y = 39
self.title_sprite_right_y = 39
elif isinstance( self.scenario, scenarios.ScenarioDiamondCollect ):
self.title_sprite_left = resman.get("game.diamond_sprite").clone()
self.title_sprite_right = resman.get("game.diamond_sprite").clone()
self.left_anim_timer = gfx.LoopAnimationTimer( 25, 0, 4 )
self.right_anim_timer = gfx.LoopAnimationTimer( 25, 0, 4 )
self.title_sprite_left_y = 27
self.title_sprite_right_y = 27
elif isinstance( self.scenario, scenarios.ScenarioCollectAll ):
self.title_sprite_left = resman.get("game.copper_sprite").clone()
self.title_sprite_right = resman.get("game.diamond_sprite").clone()
self.left_anim_timer = gfx.LoopAnimationTimer( 25, 0, self.title_sprite_left.max_x )
self.right_anim_timer = gfx.LoopAnimationTimer( 25, 0, 4 )
self.title_sprite_left_y = 23
self.title_sprite_right_y = 27
elif isinstance( self.scenario, scenarios.ScenarioPacman ):
self.title_sprite_left = resman.get("game.copper_sprite").clone()
self.title_sprite_right = resman.get("game.copper_sprite").clone()
self.left_anim_timer = gfx.LoopAnimationTimer( 25, 0, self.title_sprite_left.max_x )
self.right_anim_timer = gfx.LoopAnimationTimer( 25, 0, self.title_sprite_left.max_x )
self.title_sprite_left_y = 23
self.title_sprite_right_y = 23
def draw_pickup( self, surface, time_sec, pos ):
if self.pickup_surf is not None:
self.font.draw( _("pickup:"), surface, pos )
self.pickup_surf.draw( surface, Vec2D(pos[0], pos[1] ) + Vec2D(90, self.pickup_y) )
def draw_title( self, surface, time_sec, pos ):
self.title_font.draw( self.scenario.title, surface, pos, gfx.Font.CENTER )
width = self.title_font.get_width( self.scenario.title )
left_pos = Vec2D( pos[0] - width/2 - 25, pos[1] + self.title_sprite_left_y )
right_pos = Vec2D( pos[0] + width/2 + 25, pos[1] + self.title_sprite_right_y )
if self.title_sprite_left is not None:
if self.left_anim_timer is not None:
self.title_sprite_left.nr = self.left_anim_timer.get_frame( time_sec )
self.title_sprite_left.draw( surface, left_pos )
if self.title_sprite_right is not None:
if self.right_anim_timer is not None:
self.title_sprite_right.nr = self.right_anim_timer.get_frame( time_sec )
self.title_sprite_right.draw( surface, right_pos )
def draw_opponents( self, surface, time_sec, pos ):
opponent_count = len(self.game_data.get_quest().get_opponent_iqs())
pos = Vec2D(pos[0], pos[1]) - Vec2D(35, 17) * ((opponent_count-1)/2)
for i in range(0, opponent_count):
offset = Vec2D(i*35, i*17)
sprite = copy.copy(resman.get("game.car%d_sprite" % (i+2)))
sprite.nr = 0
sprite.draw( surface, pos + offset )
class CrateHud:
"""Crates at the bottom of the window"""
def __init__( self, game_data ):
self.game_data = game_data
self.crate_sprite = resman.get("game.crate_sprite")
self.crate_label = resman.get("game.crate_label_surf")
self.unlock_timer = 0
self.teaser_timer = -20
self.teaser_cnt = -1
self.teaser_dir = 1
def tick( self ):
if self.unlock_timer > 0:
self.unlock_timer += 1
self.teaser_timer = -200
self.teaser_cnt = -1
self.teaser_dir = 1
else:
self.teaser_timer += 1
if self.teaser_timer > 3:
if self.teaser_cnt == -1 and self.teaser_dir == -1:
self.teaser_timer = -100
self.teaser_dir = 1
else:
self.teaser_timer = 0
self.teaser_cnt += self.teaser_dir
if self.teaser_cnt == 10:
self.teaser_dir = -1
def start_unlock( self ):
self.unlock_timer = 1
def stop_unlock( self ):
self.unlock_timer = 0
def draw( self, surface ):
for i in range(0,11):
if (i < self.game_data.unlocked_item_count) \
or \
(i == self.game_data.unlocked_item_count and \
(self.unlock_timer % 4 > 1)) \
or \
(i == self.teaser_cnt):
self.crate_sprite.nr = i+1
else:
self.crate_sprite.nr = 0
# Quick Hack because dynamite and diamond are swapped
if self.crate_sprite.nr == 1:
self.crate_sprite.nr = 2
elif self.crate_sprite.nr == 2:
self.crate_sprite.nr = 1
self.crate_sprite.draw( surface, Vec2D(15 + i*73, 600-66) )
self.crate_label.draw( surface, Vec2D(0, 600-100) )
font = Font( "data/edmunds.ttf", color=(0,0,0), size=20, use_antialias = True )
font.draw(_("Secret Items:"), surface, (18, 600-97))
|
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core classes for firewall policy enforcement.
Simplifies the interface with the compute API for managing firewall policies.
"""
import hashlib
import json
import operator
import socket
import ssl
import http.client
import httplib2
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import rule_validator as rv
# The name of the GCE API.
API_NAME = 'compute'
# The root of the GCE API.
API_ROOT = 'https://www.googleapis.com/'
# The version of the GCE API to use.
API_VERSION = 'v1'
LOGGER = logger.logging
# What transient exceptions should be retried.
RETRY_EXCEPTIONS = (http.client.ResponseNotReady, http.client.IncompleteRead,
httplib2.ServerNotFoundError, socket.error, ssl.SSLError,)
# Maximum time to allow an active API operation to wait for status=Done
OPERATION_TIMEOUT = 120.0
# The number of times to retry an operation if it times out before completion.
OPERATION_RETRY_COUNT = 5
class Error(Exception):
"""Base error class for the module."""
class InvalidFirewallRuleError(Error):
"""Raised if a firewall rule doesn't look like a firewall rule should."""
class FirewallRuleValidationError(Error):
"""Raised if a firewall rule fails validation."""
class DuplicateFirewallRuleNameError(Error):
"""Raised if a rule name is reused in a policy, names must be unique."""
class FirewallEnforcementFailedError(Error):
"""Updating firewall for project failed."""
class FirewallEnforcementInsertFailedError(FirewallEnforcementFailedError):
"""Insertion of a firewall rule failed."""
class FirewallEnforcementUpdateFailedError(FirewallEnforcementFailedError):
"""Update of a firewall rule failed."""
class FirewallEnforcementDeleteFailedError(FirewallEnforcementFailedError):
"""Deletion of a firewall rule failed."""
class NetworkImpactValidationError(FirewallEnforcementFailedError):
"""Raised if a firewall rule is to be applied to a disallowed network."""
class EmptyProposedFirewallRuleSetError(FirewallEnforcementFailedError):
"""Raised if the proposed firewall rule set is empty."""
class FirewallQuotaExceededError(FirewallEnforcementFailedError):
"""Raised if the proposed changes would exceed firewall quota."""
def get_network_name_from_url(network_url):
"""Given a network URL, return the name of the network.
Args:
network_url (str): The fully qualified network url, such as
('<root>/compute/v1/projects/my-proj/global/networks/my-network')
Returns:
str: The network name, my-network in the previous example
"""
return network_url.split('/')[-1]
def build_network_url(project, network):
"""Render the network url from project and network names.
Args:
project (str): The name of the GCE project to operate upon.
network (str): The name of the network to operate upon.
Returns:
str: The fully qualified network url for the given project/network.
"""
return (u'%(root)s%(api_name)s/%(version)s/projects/%(project)s/global/'
'networks/%(network)s') % {'api_name': API_NAME,
'network': network,
'project': project,
'root': API_ROOT,
'version': API_VERSION}
def _is_successful(operation):
"""Checks if the operation finished with no errors.
If the operation response contains an 'error' key, then the error code
is checked. Any error code that is not ignored causes this to return
False.
Args:
operation (dict): A Compute GlobalOperations response object from an API
call.
Returns:
bool: True if there were no errors, or all errors are ignored, otherwise
False.
"""
success = True
if 'error' in operation:
# 'error' should always contains an 'errors' list:
if 'errors' in operation['error']:
for err in operation['error']['errors']:
# We ignore the following errors:
# RESOURCE_ALREADY_EXISTS: Because another program somewhere
# else could have already added the rule.
# INVALID_FIELD_VALUE: Because the network probably
# disappeared out from under us.
if err.get('code') in ['RESOURCE_ALREADY_EXISTS',
'INVALID_FIELD_VALUE']:
LOGGER.warning('Ignoring error: %s', err)
else:
LOGGER.error('Operation has error: %s', err)
success = False
else:
LOGGER.error('Unknown error response: %s', operation['error'])
success = False
return success
def filter_rules_by_network(rules, network):
"""Returns the subset of rules that apply to the specified network(s).
Args:
rules (list): A list of rule dicts to filter.
network (str): The network name to restrict rules to. If no network
specified then all rules are returned.
Returns:
list: A list of rules that apply to the filtered networks.
"""
if not network:
return rules
filtered_rules = []
for rule in rules:
if get_network_name_from_url(rule['network']) == network:
filtered_rules.append(rule)
return filtered_rules
def _rule_update_can_patch(rule_from, rule_to):
if not rule_to or not rule_from:
raise ValueError('from and to must both exist for checking replace vs patch.')
if 'allowed' in rule_from and 'denied' in rule_to:
return False # Patch fails to update allowed -> denied
if 'denied' in rule_from and 'allowed' in rule_to:
return False # Patch fails to update denied -> allowed
return True
class FirewallRules(object):
"""A collection of validated firewall rules."""
DEFAULT_PRIORITY = 1000
DEFAULT_DIRECTION = 'INGRESS'
DEFAULT_DISABLED = False
DEFAULT_LOGCONFIG = {'enable': False}
def __init__(self, project, rules=None, add_rule_callback=None):
"""Constructor.
Args:
project (str): The GCE project id the rules apply to.
rules (list): A list of rule dicts to add to the object.
add_rule_callback (function): A callback function that checks
whether a firewall rule should be applied. If the callback
returns False, that rule will not be modified.
Raises:
DuplicateFirewallRuleNameError: Two or more rules have the same
name.
InvalidFirewallRuleError: One or more rules failed validation.
"""
self._project = project
self.rules = {}
self._add_rule_callback = add_rule_callback
if rules:
self.add_rules(rules)
def __eq__(self, other):
"""Equality.
Args:
other (FirewallRules): The other object to compare with.
Returns:
bool: True if equal, else false.
"""
return self.rules == other.rules
def __ne__(self, other):
"""Not Equal.
Args:
other (FirewallRules): The other object to compare with.
Returns:
bool: True if not equal, else false.
"""
return self.rules != other.rules
def add_rules_from_api(self, compute_client):
"""Loads rules from compute.firewalls().list().
Args:
compute_client (object): A ComputeClient instance for interfacing with
GCE API.
Raises:
DuplicateFirewallRuleNameError: Two or more rules have the same
name.
InvalidFirewallRuleError: One or more rules failed validation.
"""
if self.rules:
LOGGER.warning(
'Can not import rules from the API into a FirewallRules '
'object with rules already added')
return
firewall_rules = compute_client.get_firewall_rules(self._project)
for rule in firewall_rules:
# Only include keys in the ALLOWED_RULE_ITEMS set.
scrubbed_rule = dict(
[(k, v) for k, v in list(rule.items()) if k
in rv.ALLOWED_RULE_ITEMS])
self.add_rule(scrubbed_rule)
def add_rules(self, rules, network_name=None):
"""Adds rules from a list of rule dicts.
Args:
rules (list): A list of rule dicts to add to the object
network_name (str): If set, rules which have no network currently
defined will have their network set to network_name, and
network_name will be prepended to the rule name.
Rules that do have a network defined have their network matched
against network_name, and if they differ the rule is not added.
Raises:
DuplicateFirewallRuleNameError: Two or more rules have the same
name.
InvalidFirewallRuleError: One or more rules failed validation.
"""
for rule in rules:
self.add_rule(rule, network_name=network_name)
def add_rule(self, rule, network_name=None):
"""Adds rule to the self.rules dictionary.
Args:
rule (dict): A valid dict representing a GCE firewall rule
network_name (str): If set, rules which have no network currently
defined will have their network set to network_name, and
network_name will be prepended to the rule name.
Rules that do have a network defined have their network matched
against network_name, and if they differ the rule is not added.
Raises:
DuplicateFirewallRuleNameError: Two or more rules have the same
name.
InvalidFirewallRuleError: One or more rules failed validation.
"""
# pylint: disable=too-many-branches
if not isinstance(rule, dict):
raise InvalidFirewallRuleError(
'Invalid rule type. Found %s expected dict' % type(rule))
new_rule = self._order_lists_in_rule(rule)
if network_name:
if 'network' in new_rule:
rule_network = get_network_name_from_url(new_rule['network'])
if rule_network != network_name:
# Don't add the rule if it's network does not match
# network_name
LOGGER.info('Firewall rule does not apply to network %s, '
'skipping: %s', network_name,
json.dumps(new_rule, sort_keys=True))
return
# Normalize network value to full network URL for rules that
# already include a network name, so comparison with a rule
# from the API will return True
new_rule['network'] = build_network_url(self._project,
network_name)
else:
new_rule['network'] = build_network_url(self._project,
network_name)
# Update the rule name by prepending the network, so it is
# unique. If the new rule does not have a name defined it will
# fail the _check_rule_before_adding validation and an
# InvalidFirewallRuleError exception will be raised.
if 'name' in new_rule:
# Truncate network name if too long. This may result in
# duplicate rule names, which will cause the network name
# to be changed to a md5 hash representation.
new_name = '%s-%s' % (
network_name[:(62 - len(new_rule['name']))],
new_rule['name'])
while new_name in self.rules:
# Firewall rule names must start with [a-z], hashes
# could start with a number, so we prepend hn-
# (hashed network) to the name.
network_name = 'hn-' + hashlib.md5(
network_name.encode()).hexdigest()
new_name = '%s-%s' % (
network_name[:(62 - len(new_rule['name']))],
new_rule['name'])
new_rule['name'] = new_name
if 'priority' not in new_rule:
new_rule['priority'] = self.DEFAULT_PRIORITY
if 'direction' not in new_rule:
new_rule['direction'] = self.DEFAULT_DIRECTION
if 'logConfig' not in new_rule:
new_rule['logConfig'] = self.DEFAULT_LOGCONFIG
if 'disabled' not in new_rule:
new_rule['disabled'] = self.DEFAULT_DISABLED
error = rv.validate_gcp_rule(new_rule)
if error:
raise InvalidFirewallRuleError(error)
if rule['name'] in self.rules:
raise DuplicateFirewallRuleNameError(
'Rule %s already defined in rules: %s' % (
rule['name'], ', '.join(sorted(self.rules.keys()))))
callback_ok = (
self._add_rule_callback(new_rule)
if self._add_rule_callback else True)
if callback_ok:
self.rules[new_rule['name']] = new_rule
def filtered_by_networks(self, networks):
"""Returns the subset of rules that apply to the specified network(s).
Args:
networks (list): A list of one or more network names to fetch rules
for.
Returns:
dict: A dictionary of rules that apply to the filtered networks.
"""
if not networks:
return self.rules
filtered_rules = {}
for rule_name, rule in list(self.rules.items()):
if get_network_name_from_url(rule['network']) in networks:
filtered_rules[rule_name] = rule
return filtered_rules
def as_json(self):
"""Export rules to a json string.
The JSON string should be an array of Firewall resource objects, see
https://cloud.google.com/compute/docs/reference/latest/firewalls
for details. Only the fields in ALLOWED_RULE_ITEMS are permitted.
Returns:
str: A JSON string with an array of rules sorted by network and
name.
"""
rules = sorted(
list(self.rules.values()), key=operator.itemgetter('network',
'name'))
return json.dumps(rules, sort_keys=True)
def add_rules_from_json(self, json_rules):
"""Import rules from a json string as exported by as_json.
The JSON string should be an array of Firewall resource objects, see
https://cloud.google.com/compute/docs/reference/latest/firewalls
for details. Only the fields in ALLOWED_RULE_ITEMS are permitted.
The legacy format from older versions of GCE Enforcer is also supported.
This format wraps the array of Firewall resources in a dictionary under
the key 'items'.
Args:
json_rules (str): The JSON formatted string containing the rules to
import.
Raises:
DuplicateFirewallRuleNameError: Two or more rules have the same
name.
InvalidFirewallRuleError: One or more rules failed validation.
"""
if self.rules:
LOGGER.warning('Can not import from JSON into a FirewallRules '
'object with rules already added')
return
rules = json.loads(json_rules)
if isinstance(rules, list):
for rule in rules:
self.add_rule(rule)
elif isinstance(rules, dict):
if 'items' in rules:
for item in rules['items']:
rule = dict(
[(key, item[key]) for key in rv.ALLOWED_RULE_ITEMS
if key in item])
self.add_rule(rule)
def _order_lists_in_rule(self, unsorted_rule):
"""Recursively iterates a rule dictionary and sorts any lists.
This ensures that two rule with the same polices, but with unordered
lists will compare equal when tested.
Args:
unsorted_rule (dict): A rule dictionary that has not been sorted.
Returns:
dict: A new rule dictionary with the lists sorted
"""
sorted_rule = {}
value_key = None
for key, value in list(unsorted_rule.items()):
if isinstance(value, list):
if value and isinstance(value[0], dict): # List of dictionaries
for i, entry in enumerate(value):
value[i] = self._order_lists_in_rule(entry)
value_key = list(value[0].keys())[0]
value = sorted(value, key=lambda k: str(k[value_key]))
sorted_rule[key] = sorted(value, key=sorted)
elif isinstance(value, dict):
sorted_rule[key] = self._order_lists_in_rule(value)
else:
sorted_rule[key] = value
return sorted_rule
# pylint: disable=too-many-instance-attributes
# TODO: Investigate improving so we can avoid the pylint disable.
class FirewallEnforcer(object):
"""Enforce a set of firewall rules for use with GCE projects."""
def __init__(self,
project,
compute_client,
expected_rules,
current_rules=None,
project_sema=None,
operation_sema=None,
add_rule_callback=None):
"""Constructor.
Args:
project (str): The id of the cloud project to enforce the firewall
on.
compute_client (object): A ComputeClient instance for interfacing
with GCE API.
expected_rules (object): A FirewallRules object with the expected
rules to be enforced on the project.
current_rules (object): A FirewallRules object with the current
rules for the project. If not defined, the API will be queried
and the existing rules imported into current_rules when
apply_firewall is called for the project.
project_sema (object): An optional semaphore object, used to limit
the number of concurrent projects getting written to.
operation_sema (object): [DEPRECATED] An optional semaphore object,
used to limit the number of concurrent write operations on
project firewalls.
add_rule_callback (function): A callback function that checks
whether a firewall rule should be applied. If the callback
returns False, that rule will not be modified.
"""
self.project = project
self.compute_client = compute_client
self.expected_rules = expected_rules
if current_rules:
self.current_rules = current_rules
else:
self.current_rules = None
self.project_sema = project_sema
if operation_sema:
LOGGER.warning(
'Operation semaphore is deprecated. Argument ignored.')
self.operation_sema = None
self._add_rule_callback = add_rule_callback
# Initialize private parameters
self._rules_to_delete = []
self._rules_to_insert = []
self._rules_to_update = []
self._deleted_rules = []
self._inserted_rules = []
self._updated_rules = []
def apply_firewall(self,
prechange_callback=None,
networks=None,
allow_empty_ruleset=False):
"""Enforce the expected firewall rules on the project.
Args:
prechange_callback (function): An optional callback function that
will get called if the firewall policy for a project does not
match the expected policy, before any changes are actually
applied. If the callback returns False then no changes will be
made to the project. If it returns True then the changes will be
pushed. If prechange_callback is set to None then the callback
will be skipped and enforcement will continue as though it had
returned True.
The callback template is callback_func(project,
rules_to_delete,
rules_to_insert,
rules_to_update)
The callback may be used to limit the kinds of firewall changes
that are allowed to be pushed for a project, limit the number of
rules that can get changed, to check if the project should have
rules changed, etc.
The callback may also raise FirewallEnforcementFailedError if it
determines that the set of changes to the policy could result in
an outage for an underlying service, or otherwise are
inconsistent with business rules. This will cause the
enforcement to fail.
networks (list): A list of networks to limit rule changes to. Rules
on networks not in the list will not be changed.
Note- This can lead to duplicate rule name collisions since all
rules are not included when building the change set. The
change set will be validated before getting enforced and
any errors will cause a FirewallEnforcementFailedError
exception to be raised.
allow_empty_ruleset (booL): If set to true and expected_rules has no
rules, all current firewall rules will be deleted from the
project.
Returns:
int: The total number of firewall rules deleted, inserted and
updated.
Raises:
EmptyProposedFirewallRuleSetError: An error occurred while updating
the firewall. The calling code should validate the current state
of the project firewall, and potentially revert to the old
firewall rules.
Any rules changed before the error occurred can be retrieved by
calling the Get(Deleted|Inserted|Updated)Rules methods.
"""
# Reset change sets to empty lists
self._rules_to_delete = []
self._rules_to_insert = []
self._rules_to_update = []
if not self.current_rules:
self.refresh_current_rules()
if not self.expected_rules.rules and not allow_empty_ruleset:
raise EmptyProposedFirewallRuleSetError(
'No rules defined in the expected rules.')
if (self.current_rules.filtered_by_networks(networks) ==
self.expected_rules.filtered_by_networks(networks)):
LOGGER.info(
'Current and expected rules match for project %s.',
self.project)
return 0
self._build_change_set(networks)
self._validate_change_set(networks)
if prechange_callback:
if not prechange_callback(self.project, self._rules_to_delete,
self._rules_to_insert,
self._rules_to_update):
LOGGER.warning(
'The Prechange Callback returned False for project %s, '
'changes will not be applied.', self.project)
return 0
if self.project_sema:
self.project_sema.acquire()
try:
delete_before_insert = self._check_change_operation_order(
len(self._rules_to_insert), len(self._rules_to_delete))
changed_count = 0
if not networks:
networks = [None] # Default to all networks
for network in networks:
changed_count += self._apply_change_set(
delete_before_insert, network)
finally:
if self.project_sema:
self.project_sema.release()
return changed_count
def fetch_rules_to_change(self):
"""Fetches the rules that are supposed to be changed.
Returns:
list: Rules to delete.
list: Rules to insert.
list: Rules to update.
"""
return (self._rules_to_delete, self._rules_to_insert,
self._rules_to_update)
def refresh_current_rules(self):
"""Updates the current rules for the project using the compute API."""
current_rules = FirewallRules(self.project,
add_rule_callback=self._add_rule_callback)
current_rules.add_rules_from_api(self.compute_client)
self.current_rules = current_rules
def get_deleted_rules(self):
"""Returns the list of deleted rules.
Returns:
list: The list of deleted rules.
"""
return self._deleted_rules
def get_inserted_rules(self):
"""Returns the list of inserted rules.
Returns:
list: The list of inserted rules.
"""
return self._inserted_rules
def get_updated_rules(self):
"""Returns the list of updated rules.
Returns:
list: The list of updated rules.
"""
return self._updated_rules
def _build_change_set(self, networks=None):
"""Enumerate changes between the current and expected firewall rules.
Args:
networks (list): The network names to restrict rules to. If no
networks specified then all rules are returned.
"""
if networks:
# Build new firewall rules objects from the subset of rules for
# networks
current_rules = self.current_rules.filtered_by_networks(networks)
expected_rules = self.expected_rules.filtered_by_networks(networks)
else:
current_rules = self.current_rules.rules
expected_rules = self.expected_rules.rules
for rule_name in current_rules:
if rule_name not in expected_rules:
self._rules_to_delete.append(rule_name)
for rule_name in expected_rules:
if rule_name not in current_rules:
self._rules_to_insert.append(rule_name)
for rule_name in expected_rules:
if rule_name in current_rules:
if expected_rules[rule_name] != current_rules[rule_name]:
self._rules_to_update.append(rule_name)
def _validate_change_set(self, networks=None):
"""Validate the changeset will not leave the project in a bad state.
Args:
networks (list): The network names to restrict rules to. If no
networks specified then all rules are returned.
Raises:
FirewallRuleValidationError: Raised if a rule name to be inserted
already exists on the project.
NetworkImpactValidationError: Raised if a rule to be changed exists
on a network not in the networks list.
"""
for rule_name in self._rules_to_insert:
if (rule_name in self.current_rules.rules and
rule_name not in self._rules_to_delete):
raise FirewallRuleValidationError(
'The rule %s is in the rules to insert set, but the same '
'rule name already exists on project %s. It may be used on '
'a different network.' % (rule_name, self.project))
if networks:
for rule_name in self._rules_to_update:
impacted_network = get_network_name_from_url(
self.current_rules.rules[rule_name]['network'])
if impacted_network not in networks:
raise NetworkImpactValidationError(
'The rule %s is in the rules to update set, but it is '
'currently on a network, "%s", that is not in the '
'allowed networks list for project %s: "%s". Updating '
'the rule to %s would impact the wrong network.' %
(rule_name, impacted_network, self.project,
', '.join(networks),
self.expected_rules.rules[rule_name]))
def _check_change_operation_order(self, insert_count, delete_count):
"""Check if enough quota to do the firewall changes insert first.
If current usage is near the limit, check if deleting current rules
before adding the new rules would allow the project to stay below quota.
Args:
insert_count (int): The number of rules that will be inserted.
delete_count (int): The number of rules that will be deleted.
Returns:
bool: True if existing rules should be deleted before new rules are
inserted, otherwise false.
Raises:
FirewallQuotaExceededError: Raised if there is not enough quota for
the required policy to be applied.
"""
delete_before_insert = False
try:
firewall_quota = self.compute_client.get_firewall_quota(
self.project)
except KeyError as e:
LOGGER.error('Error getting quota for project %s, %s',
self.project,
e)
firewall_quota = None
if firewall_quota:
usage = firewall_quota.get('usage', 0)
limit = firewall_quota.get('limit', 0)
if usage + insert_count > limit:
if usage - delete_count + insert_count > limit:
raise FirewallQuotaExceededError(
'Firewall enforcement cannot update the policy for '
'project %s without exceed the current firewalls '
'quota: %u,' % (self.project, limit))
else:
LOGGER.info('Switching to "delete first" rule update order '
'for project %s.', self.project)
delete_before_insert = True
else:
LOGGER.warning('Unknown firewall quota, switching to "delete '
'first" rule update order for project %s.',
self.project)
delete_before_insert = True
return delete_before_insert
def _apply_change_set(self, delete_before_insert, network):
"""Updates project firewall rules based on the generated changeset.
Extends self._(deleted|inserted|updated)_rules with the rules changed by
these operations.
Args:
delete_before_insert (bool): If true, delete operations are
completed before inserts. Otherwise insert operations are
completed first.
network (str): The network to limit rule changes to. Rules on
other networks will not be changed. If not set, then all rules
are in the change set are applied.
Returns:
int: The total number of firewall rules deleted, inserted and
updated.
"""
change_count = 0
if delete_before_insert:
change_count += self._delete_rules(network)
change_count += self._insert_rules(network)
else:
change_count += self._insert_rules(network)
change_count += self._delete_rules(network)
change_count += self._update_rules(network)
return change_count
def _insert_rules(self, network):
"""Insert new rules into the project firewall.
Args:
network (str): The network name to restrict rules to. If no network
specified then all new rules are inserted.
Returns:
int: The count of rules inserted.
Raises:
FirewallEnforcementInsertFailedError: Raised if one or more changes
fails.
"""
change_count = 0
if self._rules_to_insert:
LOGGER.info('Inserting rules: %s', ', '.join(self._rules_to_insert))
rules = filter_rules_by_network([
self.expected_rules.rules[rule_name]
for rule_name in self._rules_to_insert
], network)
insert_function = self.compute_client.insert_firewall_rule
(successes, failures, change_errors) = self._apply_change(
insert_function, rules)
self._inserted_rules.extend(successes)
change_count += len(successes)
if failures:
raise FirewallEnforcementInsertFailedError(
'Firewall enforcement failed while inserting rules for '
'project {}. The following errors were encountered: {}'
.format(self.project, change_errors))
return change_count
def _delete_rules(self, network):
"""Delete old rules from the project firewall.
Args:
network (str): The network name to restrict rules to. If no network
specified then all unexpected rules are deleted.
Returns:
int: The count of rules deleted.
Raises:
FirewallEnforcementDeleteFailedError: Raised if one or more changes
fails.
"""
change_count = 0
if self._rules_to_delete:
LOGGER.info('Deleting rules: %s', ', '.join(self._rules_to_delete))
rules = filter_rules_by_network([
self.current_rules.rules[rule_name]
for rule_name in self._rules_to_delete
], network)
delete_function = self.compute_client.delete_firewall_rule
(successes, failures, change_errors) = self._apply_change(
delete_function, rules)
self._deleted_rules.extend(successes)
change_count += len(successes)
if failures:
raise FirewallEnforcementDeleteFailedError(
'Firewall enforcement failed while deleting rules for '
'project {}. The following errors were encountered: {}'
.format(self.project, change_errors))
return change_count
def _update_rules(self, network):
"""Update existing rules in the project firewall using patch.
Args:
network (str): The network name to restrict rules to. If no network
specified then all changed rules are updated.
Returns:
int: The count of rules updated.
Raises:
FirewallEnforcementUpdateFailedError: Raised if one or more changes
fails.
"""
change_count = 0
if self._rules_to_update:
LOGGER.info('Updating rules: %s', ', '.join(self._rules_to_update))
rules = filter_rules_by_network([
self.expected_rules.rules[rule_name]
for rule_name in self._rules_to_update
], network)
rules_to_patch = []
rules_to_replace = []
for rule in rules:
if _rule_update_can_patch(self.current_rules.rules[rule['name']], rule):
rules_to_patch.append(rule)
else:
rules_to_replace.append(rule)
if rules_to_patch:
change_count += self._patch_rules(rules_to_patch)
if rules_to_replace:
change_count += self._replace_rules(rules_to_replace)
return change_count
def _patch_rules(self, rules):
LOGGER.info('Patching rules: %s', ', '.join(rule['name'] for rule in rules))
patch_function = self.compute_client.patch_firewall_rule
(successes, failures, change_errors) = self._apply_change(
patch_function, rules)
self._updated_rules.extend(successes)
if failures:
raise FirewallEnforcementUpdateFailedError(
'Firewall enforcement failed while deleting rules for '
'project {}. The following errors were encountered: {}'
.format(self.project, change_errors))
return len(successes)
def _replace_rules(self, rules):
LOGGER.info('Replacing rules: %s', ', '.join(rule['name'] for rule in rules))
patch_function = self.compute_client.replace_firewall_rule
(successes, failures, change_errors) = self._apply_change(
patch_function, rules)
self._updated_rules.extend(successes)
if failures:
raise FirewallEnforcementUpdateFailedError(
'Firewall enforcement failed while deleting rules for '
'project {}. The following errors were encountered: {}'
.format(self.project, change_errors))
return len(successes)
def _apply_change(self, firewall_function, rules):
"""Modify the firewall using the passed in function and rules.
If self.operation_sema is defined, then the number of outstanding
changes is limited to the number of semaphore locks that can be
acquired.
Args:
firewall_function (function): The delete|insert|update function to
call for this set of rules
rules (list): A list of rules to pass to the firewall_function.
Returns:
tuple: A tuple with the rules successfully changed by this function
and the rules that failed.
"""
applied_rules = []
failed_rules = []
change_errors = []
if not rules:
return applied_rules, failed_rules, change_errors
for rule in rules:
try:
response = firewall_function(self.project,
rule,
blocking=True,
retry_count=OPERATION_RETRY_COUNT,
timeout=OPERATION_TIMEOUT)
except (api_errors.ApiNotEnabledError,
api_errors.ApiExecutionError) as e:
LOGGER.exception(
'Error changing firewall rule %s for project %s: %s',
rule.get('name', ''), self.project, e)
error_str = 'Rule: %s\nError: %s' % (rule.get('name', ''), e)
change_errors.append(error_str)
failed_rules.append(rule)
continue
except api_errors.OperationTimeoutError as e:
LOGGER.exception(
'Timeout changing firewall rule %s for project %s: %s',
rule.get('name', ''), self.project, e)
error_str = 'Rule: %s\nError: %s' % (rule.get('name', ''), e)
change_errors.append(error_str)
failed_rules.append(rule)
continue
if _is_successful(response):
applied_rules.append(rule)
else:
failed_rules.append(rule)
return applied_rules, failed_rules, change_errors
|
|
"""ONVIF device abstraction."""
import asyncio
import datetime as dt
import os
from typing import List
from httpx import RequestError
import onvif
from onvif import ONVIFCamera
from onvif.exceptions import ONVIFError
from zeep.exceptions import Fault
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.util.dt as dt_util
from .const import (
ABSOLUTE_MOVE,
CONTINUOUS_MOVE,
GOTOPRESET_MOVE,
LOGGER,
PAN_FACTOR,
RELATIVE_MOVE,
TILT_FACTOR,
ZOOM_FACTOR,
)
from .event import EventManager
from .models import PTZ, Capabilities, DeviceInfo, Profile, Resolution, Video
class ONVIFDevice:
"""Manages an ONVIF device."""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry = None):
"""Initialize the device."""
self.hass: HomeAssistant = hass
self.config_entry: ConfigEntry = config_entry
self.available: bool = True
self.device: ONVIFCamera = None
self.events: EventManager = None
self.info: DeviceInfo = DeviceInfo()
self.capabilities: Capabilities = Capabilities()
self.profiles: List[Profile] = []
self.max_resolution: int = 0
self._dt_diff_seconds: int = 0
@property
def name(self) -> str:
"""Return the name of this device."""
return self.config_entry.data[CONF_NAME]
@property
def host(self) -> str:
"""Return the host of this device."""
return self.config_entry.data[CONF_HOST]
@property
def port(self) -> int:
"""Return the port of this device."""
return self.config_entry.data[CONF_PORT]
@property
def username(self) -> int:
"""Return the username of this device."""
return self.config_entry.data[CONF_USERNAME]
@property
def password(self) -> int:
"""Return the password of this device."""
return self.config_entry.data[CONF_PASSWORD]
async def async_setup(self) -> bool:
"""Set up the device."""
self.device = get_device(
self.hass,
host=self.config_entry.data[CONF_HOST],
port=self.config_entry.data[CONF_PORT],
username=self.config_entry.data[CONF_USERNAME],
password=self.config_entry.data[CONF_PASSWORD],
)
# Get all device info
try:
await self.device.update_xaddrs()
await self.async_check_date_and_time()
# Create event manager
self.events = EventManager(
self.hass, self.device, self.config_entry.unique_id
)
# Fetch basic device info and capabilities
self.info = await self.async_get_device_info()
self.capabilities = await self.async_get_capabilities()
self.profiles = await self.async_get_profiles()
# No camera profiles to add
if not self.profiles:
return False
if self.capabilities.ptz:
self.device.create_ptz_service()
# Determine max resolution from profiles
self.max_resolution = max(
profile.video.resolution.width
for profile in self.profiles
if profile.video.encoding == "H264"
)
except RequestError as err:
LOGGER.warning(
"Couldn't connect to camera '%s', but will retry later. Error: %s",
self.name,
err,
)
self.available = False
except Fault as err:
LOGGER.error(
"Couldn't connect to camera '%s', please verify "
"that the credentials are correct. Error: %s",
self.name,
err,
)
return False
return True
async def async_stop(self, event=None):
"""Shut it all down."""
if self.events:
await self.events.async_stop()
await self.device.close()
async def async_check_date_and_time(self) -> None:
"""Warns if device and system date not synced."""
LOGGER.debug("Setting up the ONVIF device management service")
device_mgmt = self.device.create_devicemgmt_service()
LOGGER.debug("Retrieving current device date/time")
try:
system_date = dt_util.utcnow()
device_time = await device_mgmt.GetSystemDateAndTime()
if not device_time:
LOGGER.debug(
"""Couldn't get device '%s' date/time.
GetSystemDateAndTime() return null/empty""",
self.name,
)
return
if device_time.UTCDateTime:
tzone = dt_util.UTC
cdate = device_time.UTCDateTime
else:
tzone = (
dt_util.get_time_zone(device_time.TimeZone)
or dt_util.DEFAULT_TIME_ZONE
)
cdate = device_time.LocalDateTime
if cdate is None:
LOGGER.warning("Could not retrieve date/time on this camera")
else:
cam_date = dt.datetime(
cdate.Date.Year,
cdate.Date.Month,
cdate.Date.Day,
cdate.Time.Hour,
cdate.Time.Minute,
cdate.Time.Second,
0,
tzone,
)
cam_date_utc = cam_date.astimezone(dt_util.UTC)
LOGGER.debug(
"Device date/time: %s | System date/time: %s",
cam_date_utc,
system_date,
)
dt_diff = cam_date - system_date
self._dt_diff_seconds = dt_diff.total_seconds()
if self._dt_diff_seconds > 5:
LOGGER.warning(
"The date/time on the device (UTC) is '%s', "
"which is different from the system '%s', "
"this could lead to authentication issues",
cam_date_utc,
system_date,
)
except RequestError as err:
LOGGER.warning(
"Couldn't get device '%s' date/time. Error: %s", self.name, err
)
async def async_get_device_info(self) -> DeviceInfo:
"""Obtain information about this device."""
device_mgmt = self.device.create_devicemgmt_service()
device_info = await device_mgmt.GetDeviceInformation()
# Grab the last MAC address for backwards compatibility
mac = None
try:
network_interfaces = await device_mgmt.GetNetworkInterfaces()
for interface in network_interfaces:
if interface.Enabled:
mac = interface.Info.HwAddress
except Fault as fault:
if "not implemented" not in fault.message:
raise fault
LOGGER.debug(
"Couldn't get network interfaces from ONVIF device '%s'. Error: %s",
self.name,
fault,
)
return DeviceInfo(
device_info.Manufacturer,
device_info.Model,
device_info.FirmwareVersion,
device_info.SerialNumber,
mac,
)
async def async_get_capabilities(self):
"""Obtain information about the available services on the device."""
snapshot = False
try:
media_service = self.device.create_media_service()
media_capabilities = await media_service.GetServiceCapabilities()
snapshot = media_capabilities and media_capabilities.SnapshotUri
except (ONVIFError, Fault, RequestError):
pass
pullpoint = False
try:
pullpoint = await self.events.async_start()
except (ONVIFError, Fault):
pass
ptz = False
try:
self.device.get_definition("ptz")
ptz = True
except ONVIFError:
pass
return Capabilities(snapshot, pullpoint, ptz)
async def async_get_profiles(self) -> List[Profile]:
"""Obtain media profiles for this device."""
media_service = self.device.create_media_service()
result = await media_service.GetProfiles()
profiles = []
if not isinstance(result, list):
return profiles
for key, onvif_profile in enumerate(result):
# Only add H264 profiles
if (
not onvif_profile.VideoEncoderConfiguration
or onvif_profile.VideoEncoderConfiguration.Encoding != "H264"
):
continue
profile = Profile(
key,
onvif_profile.token,
onvif_profile.Name,
Video(
onvif_profile.VideoEncoderConfiguration.Encoding,
Resolution(
onvif_profile.VideoEncoderConfiguration.Resolution.Width,
onvif_profile.VideoEncoderConfiguration.Resolution.Height,
),
),
)
# Configure PTZ options
if self.capabilities.ptz and onvif_profile.PTZConfiguration:
profile.ptz = PTZ(
onvif_profile.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace
is not None,
onvif_profile.PTZConfiguration.DefaultRelativePanTiltTranslationSpace
is not None,
onvif_profile.PTZConfiguration.DefaultAbsolutePantTiltPositionSpace
is not None,
)
try:
ptz_service = self.device.create_ptz_service()
presets = await ptz_service.GetPresets(profile.token)
profile.ptz.presets = [preset.token for preset in presets if preset]
except (Fault, RequestError):
# It's OK if Presets aren't supported
profile.ptz.presets = []
profiles.append(profile)
return profiles
async def async_get_stream_uri(self, profile: Profile) -> str:
"""Get the stream URI for a specified profile."""
media_service = self.device.create_media_service()
req = media_service.create_type("GetStreamUri")
req.ProfileToken = profile.token
req.StreamSetup = {
"Stream": "RTP-Unicast",
"Transport": {"Protocol": "RTSP"},
}
result = await media_service.GetStreamUri(req)
return result.Uri
async def async_perform_ptz(
self,
profile: Profile,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan=None,
tilt=None,
zoom=None,
):
"""Perform a PTZ action on the camera."""
if not self.capabilities.ptz:
LOGGER.warning("PTZ actions are not supported on device '%s'", self.name)
return
ptz_service = self.device.create_ptz_service()
pan_val = distance * PAN_FACTOR.get(pan, 0)
tilt_val = distance * TILT_FACTOR.get(tilt, 0)
zoom_val = distance * ZOOM_FACTOR.get(zoom, 0)
speed_val = speed
preset_val = preset
LOGGER.debug(
"Calling %s PTZ | Pan = %4.2f | Tilt = %4.2f | Zoom = %4.2f | Speed = %4.2f | Preset = %s",
move_mode,
pan_val,
tilt_val,
zoom_val,
speed_val,
preset_val,
)
try:
req = ptz_service.create_type(move_mode)
req.ProfileToken = profile.token
if move_mode == CONTINUOUS_MOVE:
# Guard against unsupported operation
if not profile.ptz.continuous:
LOGGER.warning(
"ContinuousMove not supported on device '%s'", self.name
)
return
req.Velocity = {
"PanTilt": {"x": pan_val, "y": tilt_val},
"Zoom": {"x": zoom_val},
}
await ptz_service.ContinuousMove(req)
await asyncio.sleep(continuous_duration)
req = ptz_service.create_type("Stop")
req.ProfileToken = profile.token
await ptz_service.Stop(
{"ProfileToken": req.ProfileToken, "PanTilt": True, "Zoom": False}
)
elif move_mode == RELATIVE_MOVE:
# Guard against unsupported operation
if not profile.ptz.relative:
LOGGER.warning(
"RelativeMove not supported on device '%s'", self.name
)
return
req.Translation = {
"PanTilt": {"x": pan_val, "y": tilt_val},
"Zoom": {"x": zoom_val},
}
req.Speed = {
"PanTilt": {"x": speed_val, "y": speed_val},
"Zoom": {"x": speed_val},
}
await ptz_service.RelativeMove(req)
elif move_mode == ABSOLUTE_MOVE:
# Guard against unsupported operation
if not profile.ptz.absolute:
LOGGER.warning(
"AbsoluteMove not supported on device '%s'", self.name
)
return
req.Position = {
"PanTilt": {"x": pan_val, "y": tilt_val},
"Zoom": {"x": zoom_val},
}
req.Speed = {
"PanTilt": {"x": speed_val, "y": speed_val},
"Zoom": {"x": speed_val},
}
await ptz_service.AbsoluteMove(req)
elif move_mode == GOTOPRESET_MOVE:
# Guard against unsupported operation
if preset_val not in profile.ptz.presets:
LOGGER.warning(
"PTZ preset '%s' does not exist on device '%s'. Available Presets: %s",
preset_val,
self.name,
", ".join(profile.ptz.presets),
)
return
req.PresetToken = preset_val
req.Speed = {
"PanTilt": {"x": speed_val, "y": speed_val},
"Zoom": {"x": speed_val},
}
await ptz_service.GotoPreset(req)
except ONVIFError as err:
if "Bad Request" in err.reason:
LOGGER.warning("Device '%s' doesn't support PTZ.", self.name)
else:
LOGGER.error("Error trying to perform PTZ action: %s", err)
def get_device(hass, host, port, username, password) -> ONVIFCamera:
"""Get ONVIFCamera instance."""
return ONVIFCamera(
host,
port,
username,
password,
f"{os.path.dirname(onvif.__file__)}/wsdl/",
no_cache=True,
)
|
|
# coding=utf-8
from django import template
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from ..models import FriendRequest, SocialGroup, GroupMembershipRequest
register = template.Library()
#--------------------------------------FOLLOWER TAGS---------------------------------------------
@register.filter
def followed_by(user1, user2):
"""
Returns whether user1 is followed by user2 or not.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous():
return False
return user1.followed_by(user2)
@register.filter
def is_follower_of(user1, user2):
"""
Returns whether user1 follows user2 or not.
:param user1: An User instance.
:param user2: An User instance.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous():
return False
return user2.followed_by(user1)
@register.filter
def followers_count(user):
"""
Returns user followers count
:param user: An User instance
"""
if not user or user.is_anonymous():
return 0
return user.followers()
@register.filter
def followed_count(user):
"""
Returns the count of how many users is the user following
:param user: An User instance
"""
if not user or user.is_anonymous():
return 0
return user.following()
#--------------------------------------FRIENDSHIP TAGS-------------------------------------------
def process_user_param(user):
if not user:
return False
if isinstance(user, User):
return user
else:
try:
return User.objects.get(pk=int(user))
except:
return False
@register.filter
def is_friends_with(user1, user2):
"""
Returns whether user1 and user2 are friends or not.
:param user1: An User instance.
:param user2: An User instance.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous():
return False
return user1.friend_of(user2)
@register.filter
def has_requested_friendship_to(user1, user2):
"""
Returns True if user1 has requested friendship to user2, False otherwise.
:param user1: An User instance.
:param user2: An User instance.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous() or user1 == user2:
return False
return FriendRequest.objects.filter(from_user=user1, to_user=user2, accepted=False).exists()
@register.filter
def friends_count(user):
"""
Returns how many users have a "friendship" relationship with given user
:param user: An User instance.
"""
user_obj = process_user_param(user)
if not user_obj:
return 0
return user_obj.friends()
#--------------------------------------GROUPS TAGS-------------------------------------------
def process_group_param(group):
if not group:
return False
if isinstance(group, SocialGroup):
return group
else:
try:
return SocialGroup.objects.get(pk=int(group))
except:
return False
role_dict = {
'creator': _(u"Creator"),
'admin': _(u"Administrator"),
'member': _(u"Member")
}
@register.filter
def relationship_with(group, user):
"""
Returns relationship between group and passed user
:param group: A SocialGroup instance.
:param user: An User instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return None
return role_dict[group_obj.relationship_with(user_obj)[1]]
@register.assignment_tag
def has_creator(group, user):
"""
Returns True if user is the creator, False otherwise
:param user: An User instance.
:param group: A SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return group_obj.creator == user_obj
@register.assignment_tag
def has_admin(group, user):
"""
Returns True if user is in the group list of administrators or is the creator, False otherwise
:param user: An User instance.
:param group: A SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return group_obj.has_admin(user_obj)
@register.assignment_tag
def is_group_member(user, group):
"""
Returns True if user is in a group member, False otherwise
:param user: An User instance.
:param group: A SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return group_obj.has_member(user_obj)
@register.assignment_tag
def has_requested_membership(user, group):
"""
Returns True if user1 has requested friendship to user2, False otherwise.
:param user: An User instance.
:param group: An SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return GroupMembershipRequest.objects.filter(
requester=user_obj,
group=group_obj,
accepted=False,
denied=False
).exists()
@register.simple_tag(takes_context=True)
def render_user_rol(context, user):
return role_dict[context['roles'][user.pk]]
@register.filter
def groups_count(user):
"""
Returns the total count of how many groups the user is a member of
"""
user_obj = process_user_param(user)
if not user_obj:
return 0
return user_obj.social_groups()
|
|
from typing import Any, Dict, Optional
import pytz
from django.conf import settings
from django.contrib.auth import authenticate, update_session_auth_hash
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.utils.html import escape
from django.utils.safestring import SafeString
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from confirmation.models import (
Confirmation,
ConfirmationKeyException,
get_object_from_key,
render_confirmation_key_error,
)
from zerver.decorator import human_users_only
from zerver.lib.actions import (
check_change_full_name,
do_change_avatar_fields,
do_change_password,
do_change_user_delivery_email,
do_change_user_setting,
do_regenerate_api_key,
do_start_email_change_process,
get_available_notification_sounds,
)
from zerver.lib.avatar import avatar_url
from zerver.lib.email_validation import (
get_realm_email_validator,
validate_email_is_valid,
validate_email_not_already_in_realm,
)
from zerver.lib.exceptions import JsonableError, RateLimited, UserDeactivatedError
from zerver.lib.i18n import get_available_language_codes
from zerver.lib.rate_limiter import RateLimitedUser
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.send_email import FromAddress, send_email
from zerver.lib.upload import upload_avatar_image
from zerver.lib.validator import check_bool, check_int, check_int_in, check_string_in
from zerver.models import UserProfile, avatar_changes_disabled, name_changes_disabled
from zerver.views.auth import redirect_to_deactivation_notice
from zproject.backends import check_password_strength, email_belongs_to_ldap
AVATAR_CHANGES_DISABLED_ERROR = gettext_lazy("Avatar changes are disabled in this organization.")
def confirm_email_change(request: HttpRequest, confirmation_key: str) -> HttpResponse:
try:
email_change_object = get_object_from_key(confirmation_key, [Confirmation.EMAIL_CHANGE])
except ConfirmationKeyException as exception:
return render_confirmation_key_error(request, exception)
new_email = email_change_object.new_email
old_email = email_change_object.old_email
user_profile = email_change_object.user_profile
if user_profile.realm.deactivated:
return redirect_to_deactivation_notice()
if not user_profile.is_active:
# TODO: Make this into a user-facing error, not JSON
raise UserDeactivatedError()
if user_profile.realm.email_changes_disabled and not user_profile.is_realm_admin:
raise JsonableError(_("Email address changes are disabled in this organization."))
do_change_user_delivery_email(user_profile, new_email)
context = {"realm_name": user_profile.realm.name, "new_email": new_email}
language = user_profile.default_language
send_email(
"zerver/emails/notify_change_in_email",
to_emails=[old_email],
from_name=FromAddress.security_email_from_name(user_profile=user_profile),
from_address=FromAddress.SUPPORT,
language=language,
context=context,
realm=user_profile.realm,
)
ctx = {
"new_email_html_tag": SafeString(
f'<a href="mailto:{escape(new_email)}">{escape(new_email)}</a>'
),
"old_email_html_tag": SafeString(
f'<a href="mailto:{escape(old_email)}">{escape(old_email)}</a>'
),
}
return render(request, "confirmation/confirm_email_change.html", context=ctx)
emojiset_choices = {emojiset["key"] for emojiset in UserProfile.emojiset_choices()}
default_view_options = ["recent_topics", "all_messages"]
def check_settings_values(
notification_sound: Optional[str],
email_notifications_batching_period_seconds: Optional[int],
default_language: Optional[str] = None,
) -> None:
# We can't use REQ for this widget because
# get_available_language_codes requires provisioning to be
# complete.
if default_language is not None and default_language not in get_available_language_codes():
raise JsonableError(_("Invalid default_language"))
if (
notification_sound is not None
and notification_sound not in get_available_notification_sounds()
and notification_sound != "none"
):
raise JsonableError(_("Invalid notification sound '{}'").format(notification_sound))
if email_notifications_batching_period_seconds is not None and (
email_notifications_batching_period_seconds <= 0
or email_notifications_batching_period_seconds > 7 * 24 * 60 * 60
):
# We set a limit of one week for the batching period
raise JsonableError(
_("Invalid email batching period: {} seconds").format(
email_notifications_batching_period_seconds
)
)
@human_users_only
@has_request_variables
def json_change_settings(
request: HttpRequest,
user_profile: UserProfile,
full_name: str = REQ(default=""),
email: str = REQ(default=""),
old_password: str = REQ(default=""),
new_password: str = REQ(default=""),
twenty_four_hour_time: Optional[bool] = REQ(json_validator=check_bool, default=None),
dense_mode: Optional[bool] = REQ(json_validator=check_bool, default=None),
starred_message_counts: Optional[bool] = REQ(json_validator=check_bool, default=None),
fluid_layout_width: Optional[bool] = REQ(json_validator=check_bool, default=None),
high_contrast_mode: Optional[bool] = REQ(json_validator=check_bool, default=None),
color_scheme: Optional[int] = REQ(
json_validator=check_int_in(UserProfile.COLOR_SCHEME_CHOICES), default=None
),
translate_emoticons: Optional[bool] = REQ(json_validator=check_bool, default=None),
default_language: Optional[str] = REQ(default=None),
default_view: Optional[str] = REQ(
str_validator=check_string_in(default_view_options), default=None
),
escape_navigates_to_default_view: Optional[bool] = REQ(json_validator=check_bool, default=None),
left_side_userlist: Optional[bool] = REQ(json_validator=check_bool, default=None),
emojiset: Optional[str] = REQ(str_validator=check_string_in(emojiset_choices), default=None),
demote_inactive_streams: Optional[int] = REQ(
json_validator=check_int_in(UserProfile.DEMOTE_STREAMS_CHOICES), default=None
),
timezone: Optional[str] = REQ(
str_validator=check_string_in(pytz.all_timezones_set), default=None
),
email_notifications_batching_period_seconds: Optional[int] = REQ(
json_validator=check_int, default=None
),
enable_drafts_synchronization: Optional[bool] = REQ(json_validator=check_bool, default=None),
enable_stream_desktop_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
enable_stream_email_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
enable_stream_push_notifications: Optional[bool] = REQ(json_validator=check_bool, default=None),
enable_stream_audible_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
wildcard_mentions_notify: Optional[bool] = REQ(json_validator=check_bool, default=None),
notification_sound: Optional[str] = REQ(default=None),
enable_desktop_notifications: Optional[bool] = REQ(json_validator=check_bool, default=None),
enable_sounds: Optional[bool] = REQ(json_validator=check_bool, default=None),
enable_offline_email_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
enable_offline_push_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
enable_online_push_notifications: Optional[bool] = REQ(json_validator=check_bool, default=None),
enable_digest_emails: Optional[bool] = REQ(json_validator=check_bool, default=None),
enable_login_emails: Optional[bool] = REQ(json_validator=check_bool, default=None),
enable_marketing_emails: Optional[bool] = REQ(json_validator=check_bool, default=None),
message_content_in_email_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
pm_content_in_desktop_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
desktop_icon_count_display: Optional[int] = REQ(
json_validator=check_int_in(UserProfile.DESKTOP_ICON_COUNT_DISPLAY_CHOICES), default=None
),
realm_name_in_notifications: Optional[bool] = REQ(json_validator=check_bool, default=None),
presence_enabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
enter_sends: Optional[bool] = REQ(json_validator=check_bool, default=None),
send_private_typing_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
send_stream_typing_notifications: Optional[bool] = REQ(json_validator=check_bool, default=None),
send_read_receipts: Optional[bool] = REQ(json_validator=check_bool, default=None),
) -> HttpResponse:
if (
default_language is not None
or notification_sound is not None
or email_notifications_batching_period_seconds is not None
):
check_settings_values(
notification_sound, email_notifications_batching_period_seconds, default_language
)
if new_password != "":
return_data: Dict[str, Any] = {}
if email_belongs_to_ldap(user_profile.realm, user_profile.delivery_email):
raise JsonableError(_("Your Zulip password is managed in LDAP"))
try:
if not authenticate(
request,
username=user_profile.delivery_email,
password=old_password,
realm=user_profile.realm,
return_data=return_data,
):
raise JsonableError(_("Wrong password!"))
except RateLimited as e:
assert e.secs_to_freedom is not None
secs_to_freedom = int(e.secs_to_freedom)
raise JsonableError(
_("You're making too many attempts! Try again in {} seconds.").format(
secs_to_freedom
),
)
if not check_password_strength(new_password):
raise JsonableError(_("New password is too weak!"))
do_change_password(user_profile, new_password)
# Password changes invalidates sessions, see
# https://docs.djangoproject.com/en/3.2/topics/auth/default/#session-invalidation-on-password-change
# for details. To avoid this logging the user out of their own
# session (which would provide a confusing UX at best), we
# update the session hash here.
update_session_auth_hash(request, user_profile)
# We also save the session to the DB immediately to mitigate
# race conditions. In theory, there is still a race condition
# and to completely avoid it we will have to use some kind of
# mutex lock in `django.contrib.auth.get_user` where session
# is verified. To make that lock work we will have to control
# the AuthenticationMiddleware which is currently controlled
# by Django,
request.session.save()
result: Dict[str, Any] = {}
new_email = email.strip()
if user_profile.delivery_email != new_email and new_email != "":
if user_profile.realm.email_changes_disabled and not user_profile.is_realm_admin:
raise JsonableError(_("Email address changes are disabled in this organization."))
error = validate_email_is_valid(
new_email,
get_realm_email_validator(user_profile.realm),
)
if error:
raise JsonableError(error)
try:
validate_email_not_already_in_realm(
user_profile.realm,
new_email,
verbose=False,
)
except ValidationError as e:
raise JsonableError(e.message)
ratelimited, time_until_free = RateLimitedUser(
user_profile, domain="email_change_by_user"
).rate_limit()
if ratelimited:
raise RateLimited(time_until_free)
do_start_email_change_process(user_profile, new_email)
if user_profile.full_name != full_name and full_name.strip() != "":
if name_changes_disabled(user_profile.realm) and not user_profile.is_realm_admin:
# Failingly silently is fine -- they can't do it through the UI, so
# they'd have to be trying to break the rules.
pass
else:
# Note that check_change_full_name strips the passed name automatically
check_change_full_name(user_profile, full_name, user_profile)
# Loop over user_profile.property_types
request_settings = {k: v for k, v in list(locals().items()) if k in user_profile.property_types}
for k, v in list(request_settings.items()):
if v is not None and getattr(user_profile, k) != v:
do_change_user_setting(user_profile, k, v, acting_user=user_profile)
if timezone is not None and user_profile.timezone != timezone:
do_change_user_setting(user_profile, "timezone", timezone, acting_user=user_profile)
# TODO: Do this more generally.
from zerver.lib.request import RequestNotes
request_notes = RequestNotes.get_notes(request)
for req_var in request.POST:
if req_var not in request_notes.processed_parameters:
request_notes.ignored_parameters.add(req_var)
if len(request_notes.ignored_parameters) > 0:
result["ignored_parameters_unsupported"] = list(request_notes.ignored_parameters)
return json_success(result)
def set_avatar_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if len(request.FILES) != 1:
raise JsonableError(_("You must upload exactly one avatar."))
if avatar_changes_disabled(user_profile.realm) and not user_profile.is_realm_admin:
raise JsonableError(str(AVATAR_CHANGES_DISABLED_ERROR))
user_file = list(request.FILES.values())[0]
if (settings.MAX_AVATAR_FILE_SIZE_MIB * 1024 * 1024) < user_file.size:
raise JsonableError(
_("Uploaded file is larger than the allowed limit of {} MiB").format(
settings.MAX_AVATAR_FILE_SIZE_MIB,
)
)
upload_avatar_image(user_file, user_profile, user_profile)
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_USER, acting_user=user_profile)
user_avatar_url = avatar_url(user_profile)
json_result = dict(
avatar_url=user_avatar_url,
)
return json_success(json_result)
def delete_avatar_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if avatar_changes_disabled(user_profile.realm) and not user_profile.is_realm_admin:
raise JsonableError(str(AVATAR_CHANGES_DISABLED_ERROR))
do_change_avatar_fields(
user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=user_profile
)
gravatar_url = avatar_url(user_profile)
json_result = dict(
avatar_url=gravatar_url,
)
return json_success(json_result)
# We don't use @human_users_only here, because there are use cases for
# a bot regenerating its own API key.
@has_request_variables
def regenerate_api_key(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
new_api_key = do_regenerate_api_key(user_profile, user_profile)
json_result = dict(
api_key=new_api_key,
)
return json_success(json_result)
|
|
import gflags
import logging
from ct.client import entry_decoder
from ct.client import state
from ct.client import aggregated_reporter
from ct.crypto import error
from ct.crypto import merkle
from ct.client import db_reporter
from ct.client import text_reporter
from ct.proto import client_pb2
from twisted.internet import defer
from twisted.internet import threads
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("entry_write_batch_size", 1000, "Maximum number of "
"entries to batch into one database write")
class Monitor(object):
def __init__(self, client, verifier, hasher, db, cert_db, log_key,
state_keeper):
self.__client = client
self.__verifier = verifier
self.__hasher = hasher
self.__db = db
self.__state_keeper = state_keeper
# TODO(ekasper): once consistency checks are in place, also load/store
# Merkle tree info.
# Depends on: Merkle trees implemented in Python.
self.__state = client_pb2.MonitorState()
self.__report = aggregated_reporter.AggregatedCertificateReport(
(text_reporter.TextCertificateReport(),
db_reporter.CertDBCertificateReport(cert_db, log_key)))
try:
self.__state = self.__state_keeper.read(client_pb2.MonitorState)
except state.FileNotFoundError:
# TODO(ekasper): initialize state file with a setup script, so we
# can raise with certainty when it's not found.
logging.warning("Monitor state file not found, assuming first "
"run.")
else:
if not self.__state.HasField("verified_sth"):
logging.warning("No verified monitor state, assuming first run.")
# load compact merkle tree state from the monitor state
self._verified_tree = merkle.CompactMerkleTree(hasher)
self._unverified_tree = merkle.CompactMerkleTree(hasher)
self._verified_tree.load(self.__state.verified_tree)
self._unverified_tree.load(self.__state.unverified_tree)
def __repr__(self):
return "%r(%r, %r, %r, %r)" % (self.__class__.__name__, self.__client,
self.__verifier, self.__db,
self.__state_file)
def __str__(self):
return "%s(%s, %s, %s, %s)" % (self.__class__.__name__, self.__client,
self.__verifier, self.__db,
self.__state_file)
def __update_state(self, new_state):
"""Update state and write to disk."""
# save compact merkle tree state into the monitor state
self._verified_tree.save(new_state.verified_tree)
self._unverified_tree.save(new_state.unverified_tree)
self.__state_keeper.write(new_state)
self.__state = new_state
logging.info("New state is %s" % new_state)
@property
def servername(self):
return self.__client.servername
@property
def data_timestamp(self):
"""Timestamp of the latest verified data, in milliseconds since epoch.
"""
return self.__state.verified_sth.timestamp
def __fired_deferred(self, result):
""""Create a fired deferred to indicate that the asynchronous operation
should proceed immediately, when the result is already available."""
fire = defer.Deferred()
fire.callback(result)
return fire
def _set_pending_sth(self, new_sth):
"""Set pending_sth from new_sth, or just verified_sth if not bigger."""
logging.info("STH verified, updating state.")
if new_sth.tree_size < self.__state.verified_sth.tree_size:
raise ValueError("pending size must be >= verified size")
if new_sth.timestamp <= self.__state.verified_sth.timestamp:
raise ValueError("pending time must be > verified time")
new_state = client_pb2.MonitorState()
new_state.CopyFrom(self.__state)
if new_sth.tree_size > self.__state.verified_sth.tree_size:
new_state.pending_sth.CopyFrom(new_sth)
else:
new_state.verified_sth.CopyFrom(new_sth)
self.__update_state(new_state)
return True
def _set_verified_tree(self, new_tree):
"""Set verified_tree and maybe move pending_sth to verified_sth."""
self._verified_tree = new_tree
old_state = self.__state
new_state = client_pb2.MonitorState()
new_state.CopyFrom(self.__state)
assert old_state.pending_sth.tree_size >= new_tree.tree_size
if old_state.pending_sth.tree_size == new_tree.tree_size:
# all pending entries retrieved
# already did consistency checks so this should always be true
assert (old_state.pending_sth.sha256_root_hash ==
self._verified_tree.root_hash())
new_state.verified_sth.CopyFrom(old_state.pending_sth)
new_state.ClearField("pending_sth")
self.__update_state(new_state)
# we just set new verified tree, so we report all changes
self.__report.report()
def _update_unverified_data(self, unverified_tree):
self._unverified_tree = unverified_tree
new_state = client_pb2.MonitorState()
new_state.CopyFrom(self.__state)
self.__update_state(new_state)
def __get_audited_sth(self, sth, verify_status):
audited_sth = client_pb2.AuditedSth()
audited_sth.sth.CopyFrom(sth)
audited_sth.audit.status = verify_status
return audited_sth
def __verify_consistency_callback(self, proof, old_sth, new_sth):
self.__db.store_sth(self.servername,
self.__get_audited_sth(new_sth,
client_pb2.UNVERIFIED))
try:
logging.debug("got proof for (%s, %s): %s",
old_sth.tree_size, new_sth.tree_size,
map(lambda b: b[:8].encode("base64")[:-2] + "...", proof))
self.__verifier.verify_sth_consistency(old_sth, new_sth, proof)
except error.VerifyError as e:
# catches both ConsistencyError and ProofError. when alerts are
# implemented, only the former should trigger an immediate alert;
# the latter may have innocent causes (e.g. data corruption,
# software bug) so we could give it a chance to recover before
# alerting.
self.__db.store_sth(self.servername,
self.__get_audited_sth(new_sth,
client_pb2.VERIFY_ERROR))
logging.error("Could not verify STH consistency: %s vs %s!!!\n%s" %
(old_sth, new_sth, e))
raise
else:
self.__db.store_sth(self.servername,
self.__get_audited_sth(new_sth,
client_pb2.VERIFIED))
def _verify_consistency(self, old_sth, new_sth):
"""Verifies that old STH is consistent with new STH.
Returns: Deferred that fires with boolean indicating whether updating
succeeded.
Deferred raises: error.ConsistencyError if STHs were inconsistent"""
proof = self.__client.get_sth_consistency(old_sth.tree_size,
new_sth.tree_size)
proof.addCallback(self.__verify_consistency_callback, old_sth, new_sth)
return proof
def __update_sth_errback(self, failure):
"""Fired if there was network error or log server sent invalid
response"""
logging.error("get-sth from %s failed: %s" % (self.servername,
failure.getErrorMessage()))
return False
def __update_sth_verify_consistency_before_accepting_eb(self, failure):
"""Errback for verify_consistency method which is called before setting
sth as verified. If STH was invalid appropriate error message is
already logged, so we only want to return false as update_sth failed."""
failure.trap(error.VerifyError)
return False
def __handle_old_sth_errback(self, failure, sth_response):
failure.trap(error.VerifyError)
logging.error("Received older STH which is older and inconsistent "
"with current verified STH: %s vs %s. Error: %s" %
(sth_response, self.__state.verified_sth, failure))
def __handle_old_sth_callback(self, result, sth_response):
logging.warning("Rejecting received "
"STH: timestamp is older than current verified "
"STH: %s vs %s " %
(sth_response, self.__state.verified_sth))
def __update_sth_callback(self, sth_response):
# If we got the same response as last time, do nothing.
# If we got an older response than last time, make sure that it's
# consistent with current verified STH and then return False.
# (If older response is consistent then, there is nothing wrong
# with the fact that we recieved older timestamp - the log could be
# out of sync - but we should not rewind to older data.)
#
# The client should always return an STH but best eliminate the
# None == None case explicitly by only shortcutting the verification
# if we already have a verified STH.
if self.__state.HasField("verified_sth"):
if sth_response == self.__state.verified_sth:
logging.info("Ignoring already-verified STH: %s" %
sth_response)
return True
elif (sth_response.timestamp <
self.__state.verified_sth.timestamp):
d = self._verify_consistency(sth_response,
self.__state.verified_sth)
d.addCallback(self.__handle_old_sth_callback, sth_response)
d.addErrback(self.__handle_old_sth_errback, sth_response)
return False
try:
self.__verifier.verify_sth(sth_response)
except (error.EncodingError, error.VerifyError):
logging.error("Invalid STH: %s" % sth_response)
return False
# Verify consistency to catch the log trying to trick us
# into rewinding the tree.
d = self._verify_consistency(self.__state.verified_sth, sth_response)
d.addCallback(lambda result: self._set_pending_sth(sth_response))
d.addErrback(self.__update_sth_verify_consistency_before_accepting_eb)
return d
def _update_sth(self):
"""Get a new candidate STH. If update succeeds, stores the new STH as
pending. Does nothing if there is already a pending
STH.
Returns: Deferred that fires with boolean indicating whether updating
succeeded."""
if self.__state.HasField("pending_sth"):
return self.__fired_deferred(True)
logging.info("Fetching new STH")
sth_response = self.__client.get_sth()
sth_response.addCallback(self.__update_sth_callback)
sth_response.addErrback(self.__update_sth_errback)
return sth_response
def _compute_projected_sth_from_tree(self, tree, extra_leaves):
partial_sth = client_pb2.SthResponse()
old_size = tree.tree_size
partial_sth.tree_size = old_size + len(extra_leaves)
# we only want to check the hash, so just use a dummy timestamp
# that looks valid so the temporal verifier doesn't complain
partial_sth.timestamp = 0
extra_raw_leaves = [leaf.leaf_input for leaf in extra_leaves]
new_tree = tree.extended(extra_raw_leaves)
partial_sth.sha256_root_hash = new_tree.root_hash()
return partial_sth, new_tree
def _compute_projected_sth(self, extra_leaves):
"""Compute a partial projected STH.
Useful for when an intermediate STH is not directly available from the
server, but you still want to do something with the root hash.
Args:
extra_leaves: Extra leaves present in the tree for the new STH, in
the same order as in that tree.
Returns:
(partial_sth, new_tree)
partial_sth: A partial STH with timestamp 0 and empty signature.
new_tree: New CompactMerkleTree with the extra_leaves integrated.
"""
return self._compute_projected_sth_from_tree(self._verified_tree,
extra_leaves)
@staticmethod
def __estimate_time(num_new_entries):
if num_new_entries < 1000:
return "a moment"
elif num_new_entries < 1000000:
return "a while"
else:
return "all night"
def _fetch_entries_eb(self, e, consumer):
logging.error("get-entries from %s failed: %s" %
(self.servername, e))
consumer.done(None)
return True
def _scan_entries(self, entries):
"""Passes entries to certificate report.
Args:
entries: array of (entry_index, entry_response) tuples.
"""
der_certs = []
for entry_index, entry in entries:
parsed_entry = entry_decoder.decode_entry(entry)
ts_entry = parsed_entry.merkle_leaf.timestamped_entry
if ts_entry.entry_type == client_pb2.X509_ENTRY:
der_cert = ts_entry.asn1_cert
der_chain = parsed_entry.extra_data.certificate_chain
else:
der_cert = (
parsed_entry.extra_data.precert_chain_entry.pre_certificate)
der_chain = (
parsed_entry.extra_data.
precert_chain_entry.precertificate_chain)
der_chain = der_chain[:]
der_certs.append((entry_index, der_cert, der_chain,
ts_entry.entry_type))
self.__report.scan_der_certs(der_certs)
class EntryConsumer(object):
"""Consumer for log_client.EntryProducer.
When everything is consumed, consumed field fires a boolean indicating
success of consuming.
"""
def __init__(self, producer, monitor, pending_sth, verified_tree):
self._producer = producer
self._monitor = monitor
self._pending_sth = pending_sth
self._query_size = self._producer._end - self._producer._start + 1
self._end = self._producer._end
self._start = self._producer._start
self._next_sequence_number = self._start
#unverified_tree is tree that will be built during consumption
self._next_sequence_number = self._producer._start
self._unverified_tree = verified_tree
self.consumed = defer.Deferred()
self._fetched = 0
def done(self, result):
if not result:
self.consumed.callback(False)
return False
self.result = result
if result < self._query_size:
logging.error("Failed to fetch all entries: expected tree size "
"%d vs retrieved tree size %d" %
(self._end + 1, self._next_sequence_number))
self.consumed.callback(False)
return False
# check that the batch is consistent with the eventual pending_sth
d = self._verify(self._partial_sth, self._unverified_tree, result)
d.addErrback(self._verify_errback)
self.consumed.callback(lambda x: d)
return True
def _verify_errback(self, failure):
failure.trap(error.VerifyError)
self._monitor._update_unverified_data(self._monitor._verified_tree)
return False
def _verify_log(self, result, new_tree, verified_entries):
logging.info("Verified %d entries" % verified_entries)
self._monitor._set_verified_tree(new_tree)
return True
def _verify(self, partial_sth, new_tree, entries_count):
d = self._monitor._verify_consistency(partial_sth,
self._pending_sth)
d.addCallback(self._verify_log, new_tree, entries_count)
return d
def consume(self, entry_batch):
self._fetched += len(entry_batch)
logging.info("Fetched %d entries (total: %d from %d)" %
(len(entry_batch), self._fetched, self._query_size))
scan = threads.deferToThread(
self._monitor._scan_entries,
enumerate(entry_batch, self._next_sequence_number))
# calculate the hash for the latest fetched certs
# TODO(ekasper): parse temporary data into permanent storage.
self._partial_sth, self._unverified_tree = \
self._monitor._compute_projected_sth_from_tree(
self._unverified_tree, entry_batch)
self._next_sequence_number += len(entry_batch)
self._monitor._update_unverified_data(self._unverified_tree)
return scan
def _fetch_entries(self, start, end):
"""Fetches entries from the log.
Returns: Deferred that fires with boolean indicating whether fetching
suceeded"""
num_new_entries = end - start + 1
logging.info("Fetching %d new entries: this will take %s..." %
(num_new_entries,
self.__estimate_time(num_new_entries)))
producer = self.__client.get_entries(start, end)
consumer = Monitor.EntryConsumer(producer, self,
self.__state.pending_sth,
self._unverified_tree)
d = producer.startProducing(consumer)
d.addCallback(consumer.done)
d.addErrback(self._fetch_entries_eb, consumer)
return consumer.consumed
def _update_entries(self):
"""Retrieve new entries according to the pending STH.
Returns: Deferred that fires with boolean indicating whether updating
succeeded.
"""
if not self.__state.HasField("pending_sth"):
return self.__fired_deferred(True)
# Default is 0, which is what we want.
wanted_entries = self.__state.pending_sth.tree_size
last_parsed_size = self._unverified_tree.tree_size
if wanted_entries > last_parsed_size:
return self._fetch_entries(last_parsed_size, wanted_entries-1)
else:
return self.__fired_deferred(True)
def _update_result(self, updates_result):
if not updates_result:
logging.error("Update failed")
return updates_result
def update(self):
"""Update log view. Returns True if the update succeeded, False if any
error occurred."""
logging.info("Starting update for %s" % self.servername)
d = self._update_sth()
d.addCallback(lambda sth_result: self._update_entries() if sth_result
else False)
d.addCallback(self._update_result)
return d
|
|
"""Helper classes for formatting data as tables"""
from collections import OrderedDict
from collections.abc import Mapping
from django.forms import MediaDefiningClass
from django.template.loader import get_template
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.text import capfirst
from wagtail.admin.ui.components import Component
from wagtail.core.utils import multigetattr
class Column(metaclass=MediaDefiningClass):
class Header:
# Helper object used for rendering column headers in templates -
# behaves as a component (i.e. it has a render_html method) but delegates rendering
# to Column.render_header_html
def __init__(self, column):
self.column = column
def render_html(self, parent_context):
return self.column.render_header_html(parent_context)
class Cell:
# Helper object used for rendering table cells in templates -
# behaves as a component (i.e. it has a render_html method) but delegates rendering
# to Column.render_cell_html
def __init__(self, column, instance):
self.column = column
self.instance = instance
def render_html(self, parent_context):
return self.column.render_cell_html(self.instance, parent_context)
header_template_name = "wagtailadmin/tables/column_header.html"
cell_template_name = "wagtailadmin/tables/cell.html"
def __init__(
self, name, label=None, accessor=None, classname=None, sort_key=None, width=None
):
self.name = name
self.accessor = accessor or name
self.label = label or capfirst(name.replace("_", " "))
self.classname = classname
self.sort_key = sort_key
self.header = Column.Header(self)
self.width = width
def get_header_context_data(self, parent_context):
"""
Compiles the context dictionary to pass to the header template when rendering the column header
"""
table = parent_context["table"]
return {
"column": self,
"table": table,
"is_orderable": bool(self.sort_key),
"is_ascending": self.sort_key and table.ordering == self.sort_key,
"is_descending": self.sort_key and table.ordering == ("-" + self.sort_key),
"request": parent_context.get("request"),
}
@cached_property
def header_template(self):
return get_template(self.header_template_name)
@cached_property
def cell_template(self):
return get_template(self.cell_template_name)
def render_header_html(self, parent_context):
"""
Renders the column's header
"""
context = self.get_header_context_data(parent_context)
return self.header_template.render(context)
def get_value(self, instance):
"""
Given an instance (i.e. any object containing data), extract the field of data to be
displayed in a cell of this column
"""
if callable(self.accessor):
return self.accessor(instance)
else:
return multigetattr(instance, self.accessor)
def get_cell_context_data(self, instance, parent_context):
"""
Compiles the context dictionary to pass to the cell template when rendering a table cell for
the given instance
"""
return {
"instance": instance,
"column": self,
"value": self.get_value(instance),
"request": parent_context.get("request"),
}
def render_cell_html(self, instance, parent_context):
"""
Renders a table cell containing data for the given instance
"""
context = self.get_cell_context_data(instance, parent_context)
return self.cell_template.render(context)
def get_cell(self, instance):
"""
Return an object encapsulating this column and an item of data, which we can use for
rendering a table cell into a template
"""
return Column.Cell(self, instance)
def __repr__(self):
return "<%s.%s: %s>" % (
self.__class__.__module__,
self.__class__.__qualname__,
self.name,
)
class TitleColumn(Column):
"""A column where data is styled as a title and wrapped in a link"""
cell_template_name = "wagtailadmin/tables/title_cell.html"
def __init__(
self, name, url_name=None, get_url=None, link_classname=None, **kwargs
):
super().__init__(name, **kwargs)
self.url_name = url_name
self._get_url_func = get_url
self.link_classname = link_classname
def get_cell_context_data(self, instance, parent_context):
context = super().get_cell_context_data(instance, parent_context)
context["link_url"] = self.get_link_url(instance, parent_context)
context["link_classname"] = self.link_classname
return context
def get_link_url(self, instance, parent_context):
if self._get_url_func:
return self._get_url_func(instance)
else:
return reverse(self.url_name, args=(instance.pk,))
class StatusFlagColumn(Column):
"""Represents a boolean value as a status tag (or lack thereof, if the corresponding label is None)"""
cell_template_name = "wagtailadmin/tables/status_flag_cell.html"
def __init__(self, name, true_label=None, false_label=None, **kwargs):
super().__init__(name, **kwargs)
self.true_label = true_label
self.false_label = false_label
class DateColumn(Column):
"""Outputs a date in human-readable format"""
cell_template_name = "wagtailadmin/tables/date_cell.html"
class UserColumn(Column):
"""Outputs the username and avatar for a user"""
cell_template_name = "wagtailadmin/tables/user_cell.html"
def __init__(self, name, blank_display_name="", **kwargs):
super().__init__(name, **kwargs)
self.blank_display_name = blank_display_name
def get_cell_context_data(self, instance, parent_context):
context = super().get_cell_context_data(instance, parent_context)
user = context["value"]
if user:
try:
full_name = user.get_full_name().strip()
except AttributeError:
full_name = ""
context["display_name"] = full_name or user.get_username()
else:
context["display_name"] = self.blank_display_name
return context
class Table(Component):
template_name = "wagtailadmin/tables/table.html"
classname = "listing"
header_row_classname = ""
def __init__(self, columns, data, template_name=None, base_url=None, ordering=None):
self.columns = OrderedDict([(column.name, column) for column in columns])
self.data = data
if template_name:
self.template_name = template_name
self.base_url = base_url
self.ordering = ordering
def get_context_data(self, parent_context):
context = super().get_context_data(parent_context)
context["table"] = self
context["request"] = parent_context.get("request")
return context
@property
def media(self):
media = super().media
for col in self.columns.values():
media += col.media
return media
@property
def rows(self):
for instance in self.data:
yield Table.Row(self.columns, instance)
def has_column_widths(self):
return any(column.width for column in self.columns.values())
class Row(Mapping):
# behaves as an OrderedDict whose items are the rendered results of
# the corresponding column's format_cell method applied to the instance
def __init__(self, columns, instance):
self.columns = columns
self.instance = instance
def __len__(self):
return len(self.columns)
def __getitem__(self, key):
return self.columns[key].get_cell(self.instance)
def __iter__(self):
for name in self.columns:
yield name
def __repr__(self):
return repr([col.get_cell(self.instance) for col in self.columns.values()])
|
|
import numpy as np
try:
from scipy.sparse.linalg import spsolve
from scipy.sparse import coo_matrix, eye
except ImportError:
pass
from . import triangles
from .util import unitize
from .geometry import index_sparse
from .triangles import mass_properties
def filter_laplacian(mesh,
lamb=0.5,
iterations=10,
implicit_time_integration=False,
volume_constraint=True,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing.
Articles
1 - "Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
2 - "Implicit Fairing of Irregular Meshes using Diffusion
and Curvature Flow". M. Desbrun, M. Meyer,
P. Schroder, A.H.B. Caltech
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place
lamb : float
Diffusion speed constant
If 0.0, no diffusion
If > 0.0, diffusion occurs
implicit_time_integration: boolean
if False: explicit time integration
-lamb <= 1.0 - Stability Limit (Article 1)
if True: implicit time integration
-lamb no limit (Article 2)
iterations : int
Number of passes to run filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# save initial volume
if volume_constraint:
vol_ini = mesh.volume
# get mesh vertices and faces as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
faces = mesh.faces.copy().view(np.ndarray)
# Set matrix for linear system of equations
if implicit_time_integration:
dlap = laplacian_operator.shape[0]
AA = eye(dlap) + lamb * (eye(dlap) - laplacian_operator)
# Number of passes
for _index in range(iterations):
# Classic Explicit Time Integration - Article 1
if not implicit_time_integration:
dot = laplacian_operator.dot(vertices) - vertices
vertices += lamb * dot
# Implicit Time Integration - Article 2
else:
vertices = spsolve(AA, vertices)
# volume constraint
if volume_constraint:
# find the volume with new vertex positions
vol_new = triangles.mass_properties(
vertices[faces], skip_inertia=True)["volume"]
# scale by volume ratio
vertices *= ((vol_ini / vol_new) ** (1.0 / 3.0))
# assign modified vertices back to mesh
mesh.vertices = vertices
return mesh
def filter_humphrey(mesh,
alpha=0.1,
beta=0.5,
iterations=10,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing
and Humphrey filtering.
Articles
"Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place
alpha : float
Controls shrinkage, range is 0.0 - 1.0
If 0.0, not considered
If 1.0, no smoothing
beta : float
Controls how aggressive smoothing is
If 0.0, no smoothing
If 1.0, full aggressiveness
iterations : int
Number of passes to run filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
# save original unmodified vertices
original = vertices.copy()
# run through iterations of filter
for _index in range(iterations):
vert_q = vertices.copy()
vertices = laplacian_operator.dot(vertices)
vert_b = vertices - (alpha * original + (1.0 - alpha) * vert_q)
vertices -= (beta * vert_b + (1.0 - beta) *
laplacian_operator.dot(vert_b))
# assign modified vertices back to mesh
mesh.vertices = vertices
return mesh
def filter_taubin(mesh,
lamb=0.5,
nu=0.5,
iterations=10,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing
and taubin filtering.
Articles
"Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place.
lamb : float
Controls shrinkage, range is 0.0 - 1.0
nu : float
Controls dilation, range is 0.0 - 1.0
Nu shall be between 0.0 < 1.0/lambda - 1.0/nu < 0.1
iterations : int
Number of passes to run the filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
# run through multiple passes of the filter
for index in range(iterations):
# do a sparse dot product on the vertices
dot = laplacian_operator.dot(vertices) - vertices
# alternate shrinkage and dilation
if index % 2 == 0:
vertices += lamb * dot
else:
vertices -= nu * dot
# assign updated vertices back to mesh
mesh.vertices = vertices
return mesh
def filter_mut_dif_laplacian(mesh,
lamb=0.5,
iterations=10,
volume_constraint=True,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing using a
mutable difusion laplacian.
Articles
Barroqueiro, B., Andrade-Campos, A., Dias-de-Oliveira,
J., and Valente, R. (January 21, 2021).
"Bridging between topology optimization and additive
manufacturing via Laplacian smoothing." ASME. J. Mech. Des.
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place
lamb : float
Diffusion speed constant
If 0.0, no diffusion
If > 0.0, diffusion occurs
iterations : int
Number of passes to run filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# Set volume constraint
if volume_constraint:
v_ini = mesh.volume
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
faces = mesh.faces.copy().view(np.ndarray)
eps = 0.01 * (np.max(mesh.area_faces)**0.5)
# Number of passes
for _index in range(iterations):
# Mutable difusion
normals = get_vertices_normals(mesh)
qi = laplacian_operator.dot(vertices)
pi_qi = vertices - qi
adil = np.abs((normals * pi_qi).dot(np.ones((3, 1))))
adil = 1.0 / np.maximum(1e-12, adil)
lamber = np.maximum(
0.2 * lamb, np.minimum(1.0, lamb * adil / np.mean(adil)))
# Filter
dot = laplacian_operator.dot(vertices) - vertices
vertices += lamber * dot
# Volume constraint
if volume_constraint:
vol = mass_properties(vertices[faces], skip_inertia=True)["volume"]
if _index == 0:
slope = dilate_slope(vertices, faces, normals, vol, eps)
vertices += normals * slope * (v_ini - vol)
# assign modified vertices back to mesh
mesh.vertices = vertices
return mesh
def laplacian_calculation(mesh, equal_weight=True, pinned_vertices=[]):
"""
Calculate a sparse matrix for laplacian operations.
Parameters
-------------
mesh : trimesh.Trimesh
Input geometry
equal_weight : bool
If True, all neighbors will be considered equally
If False, all neighbors will be weighted by inverse distance
Returns
----------
laplacian : scipy.sparse.coo.coo_matrix
Laplacian operator
"""
# get the vertex neighbors from the cache
neighbors = mesh.vertex_neighbors
# if a node is pinned, it will average his coordinates by himself
# in practice it will not move
for i in pinned_vertices:
neighbors[i] = [i]
# avoid hitting crc checks in loops
vertices = mesh.vertices.view(np.ndarray)
# stack neighbors to 1D arrays
col = np.concatenate(neighbors)
row = np.concatenate([[i] * len(n)
for i, n in enumerate(neighbors)])
if equal_weight:
# equal weights for each neighbor
data = np.concatenate([[1.0 / len(n)] * len(n)
for n in neighbors])
else:
# umbrella weights, distance-weighted
# use dot product of ones to replace array.sum(axis=1)
ones = np.ones(3)
# the distance from verticesex to neighbors
norms = [
1.0 / np.maximum(1e-6, np.sqrt(np.dot(
(vertices[i] - vertices[n]) ** 2, ones)))
for i, n in enumerate(neighbors)]
# normalize group and stack into single array
data = np.concatenate([i / i.sum() for i in norms])
# create the sparse matrix
matrix = coo_matrix((data, (row, col)),
shape=[len(vertices)] * 2)
return matrix
def get_vertices_normals(mesh):
"""
Compute Vertex normals using equal weighting of neighbors faces.
Parameters
-------------
mesh : trimesh.Trimesh
Input geometry
Returns
----------
vertices_normals: array
Vertices normals
"""
# get mesh vertices and faces
vertices = mesh.vertices
faces = mesh.faces
# get face normals
face_normals = mesh.face_normals
# Compute Vert normals
vert_normals = index_sparse(len(vertices), faces).dot(face_normals)
return unitize(vert_normals)
def dilate_slope(vertices, faces, normals, v, eps):
"""
Get de derivate of dilation scalar by the volume variation by finite diferences
Thus, Vertices += vertex_normals*dilate_slope*(Initial_Volume - Srinked_Volume)
Parameters
-------------
mesh : trimesh.Trimesh
Input geometry
vertices: mesh.vertices
faces: mesh.faces
normals: array
vertices normals
Returns
----------
dilate_slope: float
derivative
"""
# finite diference derivative
vertices2 = vertices + normals * eps
v2 = mass_properties(vertices2[faces], skip_inertia=True)["volume"]
return (eps) / (v2 - v)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._domains_operations import build_check_availability_request, build_create_or_update_ownership_identifier_request, build_create_or_update_request_initial, build_delete_ownership_identifier_request, build_delete_request, build_get_control_center_sso_request_request, build_get_ownership_identifier_request, build_get_request, build_list_by_resource_group_request, build_list_ownership_identifiers_request, build_list_recommendations_request, build_list_request, build_renew_request, build_update_ownership_identifier_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainsOperations:
"""DomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_availability(
self,
identifier: "_models.NameIdentifier",
**kwargs: Any
) -> "_models.DomainAvailabilityCheckResult":
"""Check if a domain is available for registration.
Description for Check if a domain is available for registration.
:param identifier: Name of the domain.
:type identifier: ~azure.mgmt.web.v2019_08_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainAvailabilityCheckResult, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.DomainAvailabilityCheckResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainAvailabilityCheckResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(identifier, 'NameIdentifier')
request = build_check_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainAvailabilityCheckResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a subscription.
Description for Get all domains in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2019_08_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace_async
async def get_control_center_sso_request(
self,
**kwargs: Any
) -> "_models.DomainControlCenterSsoRequest":
"""Generate a single sign-on request for the domain management portal.
Description for Generate a single sign-on request for the domain management portal.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainControlCenterSsoRequest, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.DomainControlCenterSsoRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainControlCenterSsoRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_control_center_sso_request_request(
subscription_id=self._config.subscription_id,
template_url=self.get_control_center_sso_request.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainControlCenterSsoRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_control_center_sso_request.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest'} # type: ignore
@distributed_trace
def list_recommendations(
self,
parameters: "_models.DomainRecommendationSearchParameters",
**kwargs: Any
) -> AsyncIterable["_models.NameIdentifierCollection"]:
"""Get domain name recommendations based on keywords.
Description for Get domain name recommendations based on keywords.
:param parameters: Search parameters for domain name recommendations.
:type parameters: ~azure.mgmt.web.v2019_08_01.models.DomainRecommendationSearchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NameIdentifierCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2019_08_01.models.NameIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NameIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_recommendations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("NameIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_recommendations.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a resource group.
Description for Get all domains in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2019_08_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> "_models.Domain":
"""Get a domain.
Description for Get a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> "_models.Domain":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'Domain')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> AsyncLROPoller["_models.Domain"]:
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2019_08_01.models.Domain
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Domain or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2019_08_01.models.Domain]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain=domain,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
domain_name: str,
force_hard_delete_domain: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Delete a domain.
Description for Delete a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param force_hard_delete_domain: Specify :code:`<code>true</code>` to delete the domain
immediately. The default is :code:`<code>false</code>` which deletes the domain after 24 hours.
:type force_hard_delete_domain: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
force_hard_delete_domain=force_hard_delete_domain,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.DomainPatchResource",
**kwargs: Any
) -> "_models.Domain":
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2019_08_01.models.DomainPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'DomainPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def list_ownership_identifiers(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DomainOwnershipIdentifierCollection"]:
"""Lists domain ownership identifiers.
Description for Lists domain ownership identifiers.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainOwnershipIdentifierCollection or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2019_08_01.models.DomainOwnershipIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.list_ownership_identifiers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainOwnershipIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_ownership_identifiers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers'} # type: ignore
@distributed_trace_async
async def get_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Get ownership identifier for domain.
Description for Get ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def create_or_update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2019_08_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_create_or_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def delete_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete ownership identifier for domain.
Description for Delete ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2019_08_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def renew(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> None:
"""Renew a domain.
Description for Renew a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_renew_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.renew.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew'} # type: ignore
|
|
import datetime
import io
import os
from functools import wraps
from itertools import count
from shutil import copyfileobj
from threading import Lock
from zipfile import ZipFile
import dateutil.parser
import imageio
import requests
from pixivpy3 import PixivAPI, AppPixivAPI, PixivError
from .constants import (
IllustType, RankingMode,
SearchMode, SearchPeriod, SearchOrder
)
from .exceptions import (
LoginFailed, NoAuth, IllustError, APIError, DownloadError
)
from .illust import PixivIllust
from .utils import Json, download
from .utils.query_set import query_set
TOKEN_LIFETIME = datetime.timedelta(seconds=1800) # In fact 3600.
ILLUST_DOWNLOAD_HEADERS = {
'Referer': 'https://app-api.pixiv.net/',
}
def _need_auth(func):
@wraps(func)
def new_func(self, *args, **kwargs):
self.check_auth(auto_re_login=self.auto_re_login)
return func(self, *args, **kwargs)
return new_func
class PixivPixie:
"""Pixiv API interface.
Remember call login() before using other methods.
Attributes:
auto_re_login: If true, PixivPixie will auto re-login when login token
expired.
"""
def __init__(self, auto_re_login=True, **requests_kwargs):
self.auto_re_login = auto_re_login
self._requests_kwargs = requests_kwargs
self._papi = PixivAPI(**requests_kwargs)
self._aapi = AppPixivAPI(**requests_kwargs)
self._has_auth = False
self._last_login = None
self._check_auth_lock = Lock()
self._username = None
self._password = None
@property
def requests_kwargs(self):
"""Parameters that will be passed to requests."""
return self._requests_kwargs
@requests_kwargs.setter
def requests_kwargs(self, requests_kwargs):
self._requests_kwargs = requests_kwargs
self._papi.requests_kwargs = requests_kwargs
self._aapi.requests_kwargs = requests_kwargs
@property
def has_auth(self):
"""Whether the pixie has login."""
return self._has_auth
@property
def last_login(self):
"""Last login time. Will be a datetime object or None if haven't login
yet."""
return self._last_login
def login(self, username, password):
"""Login Pixiv account.
Notice: The access token will expire after about 1 hour. So if you are
dealing with a long time quest, remember to re-login every some
time.
Args:
username: Your Pixiv account's username.
password: Your Pixiv account's password.
Returns:
None.
Raises:
LoginFailed: An error occurred if the username and password is not
match.
"""
if not username or not password:
raise LoginFailed
try:
self._papi.login(username, password)
# self._aapi.login(username, password)
self._aapi.access_token = self._papi.access_token
self._aapi.user_id = self._papi.user_id
self._aapi.refresh_token = self._papi.refresh_token
except PixivError:
raise LoginFailed
else:
self._has_auth = True
self._username = username
self._password = password
self._last_login = datetime.datetime.now()
return self
def check_auth(self, auto_re_login=False):
"""Raise error if the pixie doesn't has auth.
Args:
auto_re_login: If true, the PixivPixie will try to re-login when
login token expired.
Raises:
NoAuth: If the PixivPixie hasn't login first.
LoginFailed: If re-login failed.
"""
with self._check_auth_lock:
if not self.has_auth:
raise NoAuth
if datetime.datetime.now() - self.last_login >= TOKEN_LIFETIME:
# Token expired
if auto_re_login:
self.login(self._username, self._password)
else:
raise NoAuth
@_need_auth
def illust(self, illust_id):
"""Gets a single illust.
Args:
illust_id: An integer.
Returns:
A PixivIllust object.
Raises:
Any exceptions check_auth() will raise.
IllustError: If the illust_id is invalid or the illust is blocked by
the Pixiv account setting.
"""
json_result = Json(self._papi.works(illust_id))
if json_result.status != 'success':
error_code = json_result.errors.system.get('code')
error_message = {
206: 'Target illust not found.',
229: 'Illust browsing restricted.',
}
raise IllustError(illust_id, error_message.get(error_code))
return PixivIllust.from_papi(self, json_result.response[0])
@classmethod
def _papi_call(
cls, call_func,
page=1, per_page=30,
iter_target=None, extra_yield=None,
**kwargs
):
current_page = page
while current_page:
json_result = Json(call_func(
page=current_page, per_page=per_page, **kwargs
))
if json_result.status != 'success':
raise APIError(call_func, json_result.errors)
if iter_target is None:
target = json_result.response
else:
target = iter_target(json_result.response)
for item in target:
if extra_yield is None:
yield item
else:
yield item, extra_yield(json_result.response)
current_page = json_result.pagination.next
def _aapi_call(self, call_func, **kwargs):
req_auth = True
while True:
try:
if int(kwargs['offset']) >= 5000:
break
except (KeyError, ValueError):
pass
json_result = Json(call_func(**kwargs, req_auth=req_auth))
if 'error' in json_result:
raise APIError(call_func, json_result.error)
yield from json_result.illusts
if json_result.next_url is None:
break
kwargs = self._aapi.parse_qs(json_result.next_url)
@query_set
@_need_auth
def my_following_illusts(self, until=None):
"""Fetch new illusts of following users.
Fetch new illusts of following users.
Normal user can only have the first 2000 illust while Premium user can
have the first 5000.
If you didn't turn off the browsing restriction in account setting, the
R-18(G) illusts will be excluded.
Args:
until: Could be:
[default] None: No limit.
A string or datetime object which corresponding to the earliest
creation time of illusts.
Returns:
A QuerySet that yield PixivIllust object.
Raises:
Any exceptions check_auth() will raise.
"""
if isinstance(until, str):
until = dateutil.parser.parse(until)
for json_result in self._papi_call(self._papi.me_following_works):
illust = PixivIllust.from_papi(self, json_result)
if until is not None and illust.creation_time < until:
return
yield illust
@query_set
@_need_auth
def user_illusts(self, user_id):
"""Fetch a user's illusts.
Fetch a user's illusts.
If you didn't turn off the browsing restriction in account setting, the
R-18(G) illusts will be excluded.
Args:
user_id: An integer.
Returns:
A QuerySet that yield PixivIllust object.
Raises:
Any exceptions check_auth() will raise.
PAPIError: If the user_id is invalid.
"""
for json_result in self._papi_call(
self._papi.users_works, author_id=user_id,
):
yield PixivIllust.from_papi(self, json_result)
@query_set
@_need_auth
def ranking(
self, mode=RankingMode.DAY, date=None,
):
"""Fetch all ranking illusts.
Fetch all ranking illusts and returns them from rank high to low.
If you didn't turn off the browsing restriction in account setting, the
R-18(G) illusts will be excluded.
Args:
mode: Could be:
[default] DAY
WEEK
MONTH
DAY_MALE
DAY_FEMALE
WEEK_ORIGINAL
WEEK_ROOKIE
DAY_MANGA
DAY_R18
DAY_MALE_R18
DAY_FEMALE_R18
WEEK_R18
WEEK_R18G
These constants are defined in
pixiv_pixie.constants.RankingMode.
date: Could be:
[default] None: Will fetch the latest ranking.
A date or datetime object.
A string in the format of '%Y-%m-%d', e.g., '2017-08-01'.
Returns:
A QuerySet that yield PixivIllust object.
Raises:
Any exceptions check_auth() will raise.
"""
if isinstance(date, (datetime.date, datetime.datetime)):
date = date.strftime('%Y-%m-%d')
# The response of PAPI does not contains metadata. So AAPI was used.
for rank, json_result in enumerate(
self._aapi_call(
self._aapi.illust_ranking, mode=mode.value, date=date,
),
start=1
):
illust = PixivIllust.from_aapi(self, json_result)
illust.rank = rank
yield illust
@query_set
@_need_auth
def search(
self, query,
mode=SearchMode.TAG,
period=SearchPeriod.ALL,
order=SearchOrder.DESC,
):
"""Search illusts.
Search illusts.
Args:
query: Query keyword. You can separate multiple keywords by space.
mode: Could be:
TEXT: Search in title and caption.
[default] TAG: Search in tags.
EXACT_TAG: Search in tags. Only exactly matched tag is
acceptable.
CAPTION: Search in caption.
These constants are defined in pixiv_pixie.constants.SearchMode.
period: Could be:
[default] ALL
DAY
WEEK
MONTH
This parameter is only applied when order is ASC.
These constants are defined in
pixiv_pixie.constants.SearchPeriod.
order: Could be:
[default] DESC: The output will be from new to old.
ASC: The output will be from old to new.
These constants are defined in
pixiv_pixie.constants.SearchOrder.
Returns:
A QuerySet that yield PixivIllust object.
Raises:
Any exceptions check_auth() will raise.
"""
for json_result in self._papi_call(
self._papi.search_works, query=query,
mode=mode.value, period=period.value, order=order.value,
):
yield PixivIllust.from_papi(self, json_result)
@query_set
@_need_auth
def related_illusts(self, illust_id, limit=None):
"""Fetch all related illusts.
Fetch all related illusts of a provided illust.
Args:
illust_id: An integer.
limit: Max number of illust to be yield. If limit=None, there will
be no limit.
Returns:
A QuerySet that yield PixivIllust object.
Raises:
Any exceptions check_auth() will raise.
"""
for cnt, json_result in enumerate(self._aapi_call(
self._aapi.illust_related, illust_id=illust_id,
), start=1):
if limit is not None and cnt > limit:
break
yield PixivIllust.from_aapi(self, json_result)
@classmethod
def convert_zip_to_gif(
cls,
input_file, frame_delays=None, output_file=None,
use_pil=False,
):
"""Convert a zip file that contains all frames into gif.
Convert a zip file that contains all frames into gif.
Args:
input_file: The input file. May be str or a file-like object.
frame_delays: A list of delay durations in microsecond.
output_file: The output file. May be str or a file-like object.
use_pil: Whether to ues Pillow library to create GIF file. By
default FreeImage library will be used. FreeImage create better
quality and smaller size file, but require external .dll/.so and
may crash for unknown reason.
"""
if frame_delays is None:
if isinstance(input_file, str):
frame_info = os.path.splitext(input_file)[0] + '.txt'
with open(frame_info, 'rt', encoding='utf-8') as f:
frame_delays = [int(line) for line in f if line.strip()]
else:
raise ValueError('Could not get frame delays.')
if output_file is None:
if isinstance(input_file, str):
output_file = os.path.splitext(input_file)[0] + '.gif'
else:
raise ValueError('Could not determined output filename.')
dir_name = os.path.dirname(output_file)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
images = []
with ZipFile(input_file) as zip_file:
for name in sorted(zip_file.namelist()):
with zip_file.open(name) as input_file:
images.append(imageio.imread(io.BytesIO(input_file.read())))
frame_delays = [delay / 1000 for delay in frame_delays]
if not use_pil:
save_format = 'GIF-FI'
else:
save_format = 'GIF-PIL'
imageio.mimwrite(
output_file, images,
format=save_format, duration=frame_delays,
)
del images
@classmethod
def _get_file_path(
cls, illust, page, url,
convert_ugoira,
directory, name,
addition_naming_info,
):
original_name = os.path.basename(url)
root, ext = os.path.splitext(original_name)
if convert_ugoira and ext == '.zip':
ext = '.gif'
original_name = root + ext
if name:
naming_info = {
'illust': illust,
'page': page,
'original_name': original_name,
'root': root,
'ext': ext,
}
if addition_naming_info:
naming_info.update(addition_naming_info)
filename = name.format(**naming_info)
else:
filename = original_name
file_path = os.path.join(directory, filename)
return file_path
@classmethod
def _try_remove_file(cls, path):
if not isinstance(path, str) or not path:
return
try:
os.remove(path)
except OSError:
pass
@classmethod
def _check_exist(cls, path, checklist):
basename = os.path.basename(path)
for folder in checklist:
if os.path.exists(os.path.join(folder, basename)):
return True
return False
def _download_illust_to_file(self, url, file):
requests_kwargs = self.requests_kwargs.copy()
requests_kwargs['stream'] = True
requests_kwargs['headers'] = ILLUST_DOWNLOAD_HEADERS
try:
wrote_size = 0
total_size = None
for wrote_size, total_size in download(
file, url, **requests_kwargs,
):
pass
if total_size is not None and wrote_size < total_size:
raise APIError(
self.download,
'Unexpected connection interruption.',
)
except requests.HTTPError as e:
raise APIError(self.download, e.response.text) from e
def _download_one_url(
self, illust, url, path,
convert_ugoira,
replace,
check_exists,
max_tries,
fake_download,
use_pil,
):
if not replace and os.path.exists(path):
return False
if self._check_exist(path, check_exists):
return False
if fake_download:
return False
dir_name = os.path.dirname(path)
frame_path = None
for tries in count(start=1):
try:
buffer = io.BytesIO()
self._download_illust_to_file(url, buffer)
buffer.seek(0)
if illust.type == IllustType.UGOIRA and convert_ugoira:
self.convert_zip_to_gif(
buffer, illust.frame_delays, path,
use_pil,
)
else:
if dir_name:
os.makedirs(dir_name, exist_ok=True)
with open(path, 'wb') as f:
copyfileobj(buffer, f)
if illust.type == IllustType.UGOIRA:
frame_path = os.path.splitext(path)[0] + '.txt'
with open(frame_path, 'wt') as f:
for frame_delay in illust.frame_delays:
print(frame_delay, file=f)
return True
except Exception as e:
self._try_remove_file(path)
self._try_remove_file(frame_path)
if max_tries is None or tries < max_tries:
continue
raise DownloadError(illust, e) from e
def _download_multiple_urls(
self, illust, target,
convert_ugoira,
replace,
check_exists,
max_tries,
fake_download,
use_pil,
):
result = []
for url, path in target:
result.append((url, path, self._download_one_url(
illust, url, path,
convert_ugoira=convert_ugoira,
replace=replace,
check_exists=check_exists,
max_tries=max_tries,
fake_download=fake_download,
use_pil=use_pil,
)))
return result
@_need_auth
def download(
self, illust, directory=os.path.curdir,
name=None, addition_naming_info=None,
convert_ugoira=True, replace=False,
check_exists=None, max_tries=5,
fake_download=False,
use_pil=False,
):
"""Download illust.
Download illust.
Args:
illust: The illust or illust_id to be downloaded.
directory: Directory.
name: If set, the downloaded file would be renamed. Could contains
format string syntax.
e.g. name='{illust.user_id}_{original_name}'
The following information is provided:
illust: The illust object.
page: 0-based page number.
original_name: The default filename.
root: The root part of original_name. e.g. 'foo' in
'foo.bar'.
ext: The extension part of original_name. e.g. '.bar' in
'foo.bar'.
addition_naming_info: Addition dict that will be used when
formatting name.
convert_ugoira: Whether to download ugoira as gif. If false, a zip
file will be downloaded instead. And a txt file contains frame
durations would be created.
replace: If true, will replace already exist file(s).
check_exists: Addition path(s) to check whether the illust exists
(by name). Could be a path string, a list of path string or
None.
max_tries: Max try times when download failed. If max_tries=None, it
will loop infinitely until finished.
fake_download: If True, no file will be actually downloaded.
use_pil: Whether to ues Pillow library to create GIF file. Refers to
the doc of PixivPixie.convert_zip_to_gif().
Returns:
A list of download result of each page. Each result is a tuple of
(url, path, downloaded).
Raises:
Any exceptions check_auth() will raise.
DownloadError.
"""
if isinstance(illust, int):
illust = self.illust(illust)
if check_exists is None:
check_exists = []
elif isinstance(check_exists, str):
check_exists = [check_exists]
download_target = []
for tries in count(start=1):
try:
download_target = [
(
url,
self._get_file_path(
illust, page, url,
convert_ugoira,
directory, name,
addition_naming_info,
),
)
for page, url in enumerate(illust.image_urls)
]
break
except Exception as e:
if max_tries is None or tries < max_tries:
continue
raise DownloadError(illust, e) from e
return self._download_multiple_urls(
illust, download_target,
convert_ugoira=convert_ugoira,
replace=replace,
check_exists=check_exists,
max_tries=max_tries,
fake_download=fake_download,
use_pil=use_pil,
)
|
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (c) 2017-2018 Taro Sato
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""MIME Streamer
================
"""
from __future__ import absolute_import
import logging
import re
from contextlib import contextmanager
from email.parser import HeaderParser
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
from .exceptions import NoPartError
from .exceptions import ParsingError
from .utils import ensure_binary
from .utils import ensure_str
log = logging.getLogger(__name__)
NL = b'\r\n'
"""byte: The new line byte(s) used to delimit lines"""
re_split_content_type = re.compile(br'(;|' + NL + b')')
def parse_content_type(text):
"""Parse out parameters from `content-type`.
Args:
text (str): The `content-type` text.
Returns:
dict: The parameters parsed out from `content-type`.
"""
items = re_split_content_type.split(ensure_binary(text))
d = {ensure_str('mime-type'): ensure_str(items.pop(0).lower())}
for item in items:
item = item.strip()
try:
idx = item.index(b'=')
k = ensure_str(item[:idx])
v = ensure_str(item[idx + 1:].strip(b'"'))
except Exception:
continue
d[k] = v
return d
class Part(object):
"""A part constituting (multipart) message."""
def __init__(self, headers=None):
self._headers = headers or {}
self._content = None
@property
def content(self):
return self._content
@content.setter
def content(self, stream_content):
self._content = stream_content
@property
def headers(self):
return self._headers
def flush_content(self):
"""Read the entire stream for this part to ensure the cursor points to
the byte right after the end of the content or the part.
"""
chunk_size = 256
chunk = None
flushed = 0
try:
while chunk != b'':
chunk = self._content.read(chunk_size)
flushed += len(chunk)
except Exception:
log.exception('Error flushing part content')
raise
else:
if flushed:
log.debug('Flushed unread content of size %d bytes', flushed)
else:
log.debug('Part content was fully read before exit')
def get_multipart_boundary(self):
"""Get the sentinel string indicating multipart boundary if exists."""
if 'content-type' in self.headers:
# Try looking for boundary info in this header
pars = parse_content_type(self.headers['content-type'])
if pars['mime-type'].startswith('multipart/'):
return pars.get('boundary')
class StreamContent(object):
"""The iterator interface for reading content from a
:class:`MIMEStreamer` object.
Args:
streamer (:class:`MIMEStreamer`): The streamer object
representing the MIME content.
"""
def __init__(self, streamer):
self._streamer = streamer
# The buffer storing current line
self._buff = b''
# The character position last read from `_buff`
self._pos = 0
# Boolean flag of whether EOF/boundary has been seen or not
self._eof_seen = False
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
"""Read a byte from stream.
Returns:
str: A single byte from the stream.
Raises:
StopIteration: When EOF is reached.
"""
if self._eof_seen:
raise StopIteration
if self._pos + 1 >= len(self._buff):
# The cursor points past the current line in the buffer,
# so read in the new line
line = self._streamer.stream.readline()
log.debug('%r read: %r%s',
self, line[:76], '...' if len(line) > 76 else '')
if self._streamer._is_boundary(line):
log.debug('%r detected boundary', self)
self._streamer.stream.rollback_line()
self._eof_seen = True
raise StopIteration
elif line == b'':
self._eof_seen = True
raise StopIteration
self._buff = line
self._pos = 0
else:
self._pos += 1
return self._buff[self._pos:self._pos+1]
def read(self, n=-1):
"""Read at most `n` bytes, returned as string.
Args:
n (int, optional): If negative or omitted, read until EOF
or part boundary is reached. If positive, at most `n`
bytes will be returned.
Returns:
str: The bytes read from streamer.
"""
assert n != 0
buff = b''
# iter(int, 1) is a way to create an infinite loop
iterator = range(n) if n > 0 else iter(int, 1)
for i in iterator:
try:
c = next(self)
except StopIteration:
break
buff += c
return buff
class StreamIO(object):
"""Wrapper for file-like object exposing only readline-related
interface suitable for use with :class:`MIMEStreamer`.
Args:
stream (file): File-like object of byte stream.
"""
def __init__(self, stream):
self.stream = stream
# Points to the head position of the line just read by
# :meth:`StreamIO.readline`.
self._head_of_last_line = None
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
return self.readline()
def readline(self, length=None):
"""Read one entire line from the file.
Returns:
str: The line including the trailing newline.
"""
if length is not None:
raise NotImplementedError
self._head_of_last_line = self.stream.tell()
line = self.stream.readline()
if line == b'':
return line
while not line.endswith(NL):
s = self.stream.readline()
if s == b'':
break
line += s
return line
def rollback_line(self):
"""Move the file's position to the head of previous line already read
by :meth:`StreamIO.readline`.
"""
self.stream.seek(self._head_of_last_line)
def reaches_eof(self):
"""Test if the next line to be read reaches EOF."""
next_line = self.readline()
self.rollback_line()
return next_line.rstrip() == b''
class MIMEStreamer(object):
"""The generic MIME content streamer.
Args:
stream (`file`): The `file`-like object that reads from a
string buffer of content in the MIME format.
boundary (`str`, optional): The MIME part boundary text.
"""
def __init__(self, stream, boundary=None):
self.stream = self.init_stream_io(stream)
self._boundary = boundary or None
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def init_stream_io(self, stream):
return StreamIO(stream)
def _is_boundary(self, line):
"""Test if `line` is a part boundary."""
return self._boundary and line.startswith(b'--' + self._boundary)
@contextmanager
def get_next_part(self):
"""Get the next part. Use this with the context manager (i.e., `with`
statement).
"""
# Assume the cursor is at the first char of headers of a part
part = None
headers = []
while 1:
line = self.stream.readline()
log.debug('%r read: %r%s',
self, line[:76], '...' if len(line) > 76 else '')
if self._is_boundary(line):
# A boundary followed by an empty line indicates the
# end of response content
if self.stream.reaches_eof():
log.debug('Content ends')
break
continue
if part is None:
# Still reading headers
if line == b'':
raise ParsingError('EOF while reading headers')
if line != NL:
log.debug('%r read header line: %s', self, line[:-2])
headers.append(line)
continue
# This empty line separates headers and content in
# the current part
log.debug('End headers %r', headers)
headers = HeaderParser().parsestr(
ensure_str(b''.join(headers)))
log.debug('Parsed headers %r', list(headers.items()))
part = Part(headers)
if not self._boundary:
boundary = part.get_multipart_boundary()
if boundary:
log.debug('Found boundary from headers: %s', boundary)
self._boundary = ensure_binary(boundary)
# Probe the line following the headers/content delimiter
if self.stream.reaches_eof():
log.debug('EOF detected')
part.content = StringIO(b'')
else:
next_line = self.stream.readline()
if self._is_boundary(next_line):
log.debug('Content is empty for this part')
part.content = StringIO(b'')
else:
log.debug('Content ready for read')
self.stream.rollback_line()
part.content = StreamContent(self)
break
if part is None:
raise NoPartError('No more part to read')
try:
yield part
finally:
if part is not None:
part.flush_content()
|
|
# Copyright (C) 2005 Graham Ashton <[email protected]>
#
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
# $Id: netsyslog.py,v 1.9 2005/11/22 16:35:40 ashtong Exp $
#
# Slightly modified by Xiaming Chen, 2015/05/02.
"""
Syslog module of reSyslog
The format of the UDP packets sent by netsyslog adheres closely to
that defined in U{RFC 3164<http://www.ietf.org/rfc/rfc3164.txt>}. Much
of the terminology used in the RFC has been incorporated into the
names of the classes and properties and is used throughout this
documentation.
"""
import os
import socket
import sys
import time
class Facility:
"""Syslog facilities"""
KERN, USER, MAIL, DAEMON, AUTH, SYSLOG, \
LPR, NEWS, UUCP, CRON, AUTHPRIV, FTP = range(12)
LOCAL0, LOCAL1, LOCAL2, LOCAL3, \
LOCAL4, LOCAL5, LOCAL6, LOCAL7 = range(16, 24)
class Level:
"""Syslog levels"""
EMERG, ALERT, CRIT, ERR, \
WARNING, NOTICE, INFO, DEBUG = range(8)
class PRI(object):
"""The PRI part of the packet.
Though never printed in the output from a syslog server, the PRI
part is a crucial part of the packet. It encodes both the facility
and severity of the packet, both of which are defined in terms of
numerical constants from the standard syslog module.
See Section 4.1.1 of RFC 3164 for details.
"""
def __init__(self, facility, severity):
"""Initialise the object, specifying facility and severity.
Specify the arguments using constants from the syslog module
(e.g. syslog.LOG_USER, syslog.LOG_INFO).
"""
assert facility is not None
assert severity is not None
self.facility = facility
self.severity = severity
def __str__(self):
value = self.facility + self.severity
return "<%s>" % value
class HEADER(object):
"""The HEADER part of the message.
The HEADER contains a timestamp and a hostname. It is the first
component of the log message that is displayed in the output of
syslog.
See Section 4.1.2 of RFC 3164 for details.
"""
def __init__(self, timestamp=None, hostname=None):
"""Initialise the object, specifying timestamp and hostname.
The timestamp represents the local time when the log message
was created. If the timestamp is not set the current local
time will be used. See the L{HEADER.timestamp} property
for a note on the format.
The hostname should be set to the hostname of the computer
that originally generated the log message. If the hostname is
not set the hostname of the local computer will be used. See
the L{HEADER.hostname} property for a note on the format.
"""
self.timestamp = timestamp
self.hostname = hostname
def __str__(self):
return "%s %s" % (self.timestamp, self.hostname)
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, value):
"""
The local time when the message was written.
Must follow the format 'Mmm DD HH:MM:SS'. If
the day of the month is less than 10, then it
MUST be represented as a space and then the
number.
"""
if not self._timestamp_is_valid(value):
value = self._calculate_current_timestamp()
self._timestamp = value
def _calculate_current_timestamp(self):
localtime = time.localtime()
day = time.strftime("%d", localtime)
if day[0] == "0":
day = " " + day[1:]
value = time.strftime("%b %%s %H:%M:%S", localtime)
return value % day
def _timestamp_is_valid(self, value):
if value is None:
return False
for char in value:
if ord(char) < 32 or ord(char) > 126:
return False
return True
@property
def hostname(self):
return self._hostname
@hostname.setter
def hostname(self, value):
"""
The hostname where the log message was created.
Should be the first part of the hostname, or
an IP address. Should NOT be set to a fully
qualified domain name.
"""
if value is None:
value = socket.gethostname()
self._hostname = value
class MSG(object):
"""Represents the MSG part of a syslog packet.
The MSG part of the packet consists of the TAG and CONTENT. The
TAG and the CONTENT fields must be separated by a non-alphanumeric
character. Unless you ensure that the CONTENT field begins with
such a character a seperator of a colon and space will be inserted
between them when the C{MSG} object is converted into a UDP
packet.
See Section 4.1.3 of RFC 3164 for details.
"""
MAX_TAG_LEN = 32
def __init__(self, tag=None, content="", pid=None):
"""Initialise the object, specifying tag and content.
See the documentation for the L{MSG.tag} and
L{MSG.content} properties for further documentation.
If the pid is set it will be prepended to the content in
square brackets when the packet is created.
"""
self.tag = tag
self.content = content
self.pid = pid
def __str__(self):
content = self.content
if self.pid is not None:
content = "[%s]" % self.pid + content
return self.tag + content
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, value):
"""The name of the program that generated the log message.
The tag can only contain alphanumeric
characters. If the tag is longer than {MAX_TAG_LEN} characters
it will be truncated automatically.
"""
if value is None:
value = sys.argv[0]
self._tag = value[:self.MAX_TAG_LEN]
@property
def content(self):
return self._content
@content.setter
def content(self, value):
"""The main component of the log message.
The content field is a freeform field that
often begins with the process ID (pid) of the
program that created the message.
"""
value = self._prepend_seperator(value)
self._content = value
def _prepend_seperator(self, value):
try:
first_char = value[0]
except IndexError:
pass
else:
if first_char.isalnum():
value = ": " + value
return value
class Packet(object):
"""Combines the PRI, HEADER and MSG into a packet.
If the packet is longer than L{MAX_LEN} bytes in length it is
automatically truncated prior to sending; any extraneous bytes are
lost.
"""
MAX_LEN = 1024
def __init__(self, pri, header, msg):
"""Initialise the object.
The three arguments must be instances of the L{PRI},
L{HEADER} and L{MSG} classes.
"""
self.pri = pri
self.header = header
self.msg = msg
def __str__(self):
message = "%s%s %s" % (self.pri, self.header, self.msg)
return message[:self.MAX_LEN]
class Syslog(object):
"""Send log messages to syslog servers.
The Syslog class provides two different methods for sending log
messages. The first approach (the L{log} method) is suitable for
creating new log messages from within a normal application. The
second (the L{send_packet} method) is designed for use in
circumstances where you need full control over the contents of
the syslog packet.
"""
PORT = 514
def __init__(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._hostnames = {}
def add_host(self, hostname):
"""Add hostname to the list of hosts that will receive packets.
Can be a hostname or an IP address. Note that if the hostname
cannot be resolved calls to L{log} or L{send_packet} will take
a long time to return.
"""
self._hostnames[hostname] = 1
def remove_host(self, hostname):
"""Remove hostname from the list of hosts that will receive packets."""
del self._hostnames[hostname]
def host_number(self):
return len(self._hostnames)
def _send_packet_to_hosts(self, packet):
for hostname in self._hostnames:
host = hostname
port = self.PORT
if ':' in hostname:
host, port = hostname.split(':')
self._sock.sendto(str(packet), (host, int(port)))
def log(self, facility, level, text, pid=False):
"""Send the message text to all registered hosts.
The facility and level will be used to create the packet's PRI
part. The HEADER will be automatically determined from the
current time and hostname. The MSG will be set from the
running program's name and the text parameter.
This is the simplest way to use reSyslog.Syslog, creating log
messages containing the current time, hostname, program name,
etc. This is how you do it::
logger = syslog.Syslog()
logger.add_host("localhost")
logger.log(Facility.USER, Level.INFO, "Hello World")
If pid is True the process ID will be prepended to the text
parameter, enclosed in square brackets and followed by a
colon.
"""
pri = PRI(facility, level)
header = HEADER()
if pid:
msg = MSG(content=text, pid=os.getpid())
else:
msg = MSG(content=text)
packet = Packet(pri, header, msg)
self._send_packet_to_hosts(packet)
def send_packet(self, packet):
"""Send a L{Packet} object to all registered hosts.
This method requires more effort than L{log} as you need to
construct your own L{Packet} object beforehand, but it does
give you full control over the contents of the packet::
pri = syslog.PRI(Facility.USER, Level.INFO)
header = syslog.HEADER("Jun 1 18:34:03", "myhost")
msg = syslog.MSG("myprog", "Hello World", mypid)
packet = syslog.Packet(pri, header, msg)
logger = syslog.Syslog()
logger.add_host("localhost")
logger.send_packet(packet)
"""
self._send_packet_to_hosts(packet)
|
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import csv
import numpy
import pandas
from itertools import izip
from StringIO import StringIO
from nose import SkipTest
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from distributions.fileutil import tempdir
from distributions.io.stream import json_load
from distributions.io.stream import open_compressed
from distributions.io.stream import protobuf_stream_load
from distributions.tests.util import assert_close
import loom.preql
from loom.format import load_encoder
from loom.test.util import CLEANUP_ON_ERROR
from loom.test.util import for_each_dataset
from loom.test.util import load_rows_csv
COUNT = 10
def make_fully_observed_row(rows_csv):
rows = iter(load_rows_csv(rows_csv))
header = rows.next()
try:
id_pos = header.index('_id')
except ValueError:
id_pos = None
dense_row = ['' for _ in header]
for row in rows:
if not any(condition == '' for condition in dense_row):
if id_pos is not None:
dense_row.pop(id_pos)
return dense_row
for i, (condition, x) in enumerate(izip(dense_row, row)):
if condition == '':
dense_row[i] = x
raise SkipTest('no dense row could be constructed')
def _check_predictions(rows_in, result_out, encoding):
encoders = json_load(encoding)
name_to_encoder = {e['name']: load_encoder(e) for e in encoders}
with open_compressed(rows_in, 'rb') as fin:
with open(result_out, 'r') as fout:
in_reader = csv.reader(fin)
out_reader = csv.reader(fout)
fnames = in_reader.next()
out_reader.next()
for in_row in in_reader:
for i in range(COUNT):
out_row = out_reader.next()
bundle = zip(fnames, in_row, out_row)
for name, in_val, out_val in bundle:
if name == '_id':
assert_equal(in_val, out_val)
continue
encode = name_to_encoder[name]
observed = bool(in_val.strip())
if observed:
assert_almost_equal(
encode(in_val),
encode(out_val))
else:
assert_true(bool(out_val.strip()))
@for_each_dataset
def test_predict(root, rows_csv, encoding, **unused):
with tempdir(cleanup_on_error=CLEANUP_ON_ERROR):
with loom.preql.get_server(root, debug=True) as preql:
result_out = 'predictions_out.csv'
rows_in = os.listdir(rows_csv)[0]
rows_in = os.path.join(rows_csv, rows_in)
preql.predict(rows_in, COUNT, result_out, id_offset=True)
print 'DEBUG', open_compressed(rows_in).read()
print 'DEBUG', open_compressed(result_out).read()
_check_predictions(rows_in, result_out, encoding)
@for_each_dataset
def test_predict_pandas(root, rows_csv, schema, **unused):
feature_count = len(json_load(schema))
with loom.preql.get_server(root, debug=True) as preql:
rows_filename = os.path.join(rows_csv, os.listdir(rows_csv)[0])
with open_compressed(rows_filename) as f:
rows_df = pandas.read_csv(
f,
converters=preql.converters,
index_col='_id')
print 'rows_df ='
print rows_df
row_count = rows_df.shape[0]
assert_equal(rows_df.shape[1], feature_count)
rows_io = StringIO(rows_df.to_csv())
result_string = preql.predict(rows_io, COUNT, id_offset=True)
result_df = pandas.read_csv(StringIO(result_string), index_col=False)
print 'result_df ='
print result_df
assert_equal(result_df.ndim, 2)
assert_equal(result_df.shape[0], row_count * COUNT)
assert_equal(result_df.shape[1], 1 + feature_count)
@for_each_dataset
def test_relate(root, **unused):
with tempdir(cleanup_on_error=CLEANUP_ON_ERROR):
with loom.preql.get_server(root, debug=True) as preql:
result_out = 'related_out.csv'
preql.relate(preql.feature_names, result_out, sample_count=10)
with open(result_out, 'r') as f:
reader = csv.reader(f)
header = reader.next()
columns = header[1:]
assert_equal(columns, preql.feature_names)
zmatrix = numpy.zeros((len(columns), len(columns)))
for i, row in enumerate(reader):
column = row.pop(0)
assert_equal(column, preql.feature_names[i])
for j, score in enumerate(row):
score = float(score)
zmatrix[i][j] = score
assert_close(zmatrix, zmatrix.T)
@for_each_dataset
def test_relate_pandas(root, rows_csv, schema, **unused):
feature_count = len(json_load(schema))
with loom.preql.get_server(root, debug=True) as preql:
result_string = preql.relate(preql.feature_names)
result_df = pandas.read_csv(StringIO(result_string), index_col=0)
print 'result_df ='
print result_df
assert_equal(result_df.ndim, 2)
assert_equal(result_df.shape[0], feature_count)
assert_equal(result_df.shape[1], feature_count)
@for_each_dataset
def test_refine_with_conditions(root, rows_csv, **unused):
with loom.preql.get_server(root, debug=True) as preql:
features = preql.feature_names
conditions = make_fully_observed_row(rows_csv)
preql.refine(
target_feature_sets=None,
query_feature_sets=None,
conditioning_row=None)
target_feature_sets = [
[features[0], features[1]],
[features[2]]]
query_feature_sets = [
[features[0], features[1]],
[features[2]],
[features[3]]]
assert_raises(
ValueError,
preql.refine,
target_feature_sets,
query_feature_sets,
conditions)
conditions[0] = None
assert_raises(
ValueError,
preql.refine,
target_feature_sets,
query_feature_sets,
conditions)
conditions[1] = None
conditions[2] = None
conditions[3] = None
preql.refine(
target_feature_sets,
query_feature_sets,
conditions)
@for_each_dataset
def test_refine_shape(root, encoding, **unused):
with loom.preql.get_server(root, debug=True) as preql:
features = preql.feature_names
target_sets = [
features[2 * i: 2 * (i + 1)]
for i in xrange(len(features) / 2)
]
query_sets = [
features[2 * i: 2 * (i + 1)]
for i in xrange(len(features) / 2)
]
result = preql.refine(target_sets, query_sets, sample_count=10)
reader = csv.reader(StringIO(result))
header = reader.next()
header.pop(0)
assert_equal(header, map(min, query_sets))
for row, target_set in izip(reader, target_sets):
label = row.pop(0)
assert_equal(label, min(target_set))
assert_equal(len(row), len(query_sets))
@for_each_dataset
def test_support_with_conditions(root, rows_csv, **unused):
with loom.preql.get_server(root, debug=True) as preql:
features = preql.feature_names
conditions = make_fully_observed_row(rows_csv)
target_feature_sets = [
[features[0], features[1]],
[features[2]]]
observed_feature_sets = [
[features[0], features[1]],
[features[2]],
[features[3]]]
preql.support(
target_feature_sets,
observed_feature_sets,
conditions)
conditions[5] = None
preql.support(
target_feature_sets,
observed_feature_sets,
conditions)
conditions[0] = None
assert_raises(
ValueError,
preql.support,
target_feature_sets,
observed_feature_sets,
conditions)
@for_each_dataset
def test_support_shape(root, rows_csv, **unused):
with loom.preql.get_server(root, debug=True) as preql:
features = preql.feature_names
conditioning_row = make_fully_observed_row(rows_csv)
target_sets = [
features[2 * i: 2 * (i + 1)]
for i in xrange(len(features) / 2)
]
observed_sets = [
features[2 * i: 2 * (i + 1)]
for i in xrange(len(features) / 2)
]
result = preql.support(
target_sets,
observed_sets,
conditioning_row,
sample_count=10)
reader = csv.reader(StringIO(result))
header = reader.next()
header.pop(0)
assert_equal(header, map(min, observed_sets))
for row, target_set in izip(reader, target_sets):
label = row.pop(0)
assert_equal(label, min(target_set))
assert_equal(len(row), len(observed_sets))
@for_each_dataset
def test_group_runs(root, schema, encoding, **unused):
with tempdir(cleanup_on_error=CLEANUP_ON_ERROR):
with loom.preql.get_server(root, encoding, debug=True) as preql:
test_columns = json_load(schema).keys()[:10]
for column in test_columns:
groupings_csv = 'group.{}.csv'.format(column)
preql.group(column, result_out=groupings_csv)
print open(groupings_csv).read()
@for_each_dataset
def test_group_pandas(root, rows_csv, rows, **unused):
row_count = sum(1 for _ in protobuf_stream_load(rows))
with loom.preql.get_server(root, debug=True) as preql:
feature_names = preql.feature_names
for feature in feature_names[:10]:
result_string = preql.group(feature)
result_df = pandas.read_csv(StringIO(result_string), index_col=0)
print 'result_df ='
print result_df
assert_equal(result_df.ndim, 2)
assert_equal(result_df.shape[0], row_count)
assert_equal(result_df.shape[1], 2)
@for_each_dataset
def test_search_runs(root, rows_csv, **unused):
rows = load_rows_csv(rows_csv)
header = rows.pop(0)
try:
id_pos = header.index('_id')
except ValueError:
id_pos = None
rows = rows[0:10]
with tempdir(cleanup_on_error=CLEANUP_ON_ERROR):
with loom.preql.get_server(root, debug=True) as preql:
for i, row in enumerate(rows):
row.pop(id_pos)
search_csv = 'search.{}.csv'.format(i)
preql.search(row, result_out=search_csv)
open(search_csv).read()
@for_each_dataset
def test_similar_runs(root, rows_csv, **unused):
rows = load_rows_csv(rows_csv)
header = rows.pop(0)
try:
id_pos = header.index('_id')
except ValueError:
id_pos = None
rows = rows[0:10]
for row in rows:
row.pop(id_pos)
with tempdir(cleanup_on_error=CLEANUP_ON_ERROR):
with loom.preql.get_server(root, debug=True) as preql:
search_csv = 'search.csv'
preql.similar(rows, result_out=search_csv)
|
|
"""
This file is part of pyS5p
https://github.com/rmvanhees/pys5p.git
The class S5Pmsm read HDF5 measurement data including its attributes and
dimensions. Initialization:
S5Pmsm attribute | hdf5 dataset | Numpy array
-------------------------------------------------------------------------
name | h5_dset.name | 'value'
value | h5_dset.value['value'] | np.squeeze(data)
| or h5_dset.value |
error | h5_dset.value['error'] | None
| or None |
coords | h5_dset.dims | [[['time',] 'row',] 'column']
units | attrs['units'] | None
long_name | attrs['long_name'] | ''
fillvalue | h5_dset.fillvalue | None
coverage | None | None
Limited to 3 dimensions
Copyright (c) 2017-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from collections import namedtuple
from copy import deepcopy
from pathlib import PurePath
from h5py import Dataset
import numpy as np
from .biweight import biweight
# - local functions --------------------------------
def pad_rows(arr1, arr2):
"""
Pad the array with the least numer of rows with NaN's
"""
if arr2.ndim == 2:
if arr1.shape[0] < arr2.shape[0]:
buff = arr1.copy()
arr1 = np.full_like(arr2, np.nan)
arr1[0:buff.shape[0], :] = buff
elif arr1.shape[0] > arr2.shape[0]:
buff = arr2.copy()
arr2 = np.full_like(arr1, np.nan)
arr2[0:buff.shape[0], :] = buff
else:
if arr1.shape[1] < arr2.shape[1]:
buff = arr1.copy()
arr1 = np.full_like(arr2, np.nan)
arr1[:, 0:buff.shape[1], :] = buff
elif arr1.shape[1] > arr2.shape[1]:
buff = arr2.copy()
arr2 = np.full_like(arr1, np.nan)
arr2[:, 0:buff.shape[1], :] = buff
return (arr1, arr2)
# - class definition -------------------------------
class S5Pmsm():
"""
Definition of class S5Pmsm which contains the data of a HDF5 dataset,
including its attributes (CF convension)
Attributes
----------
name : string
Name of the original HDF5 dataset
value : ndarray
Dataset values
error : ndarray, optional
Dataset uncertainties or error estimates
coords : OrderDict
Dataset coordinates (HDF5/netCDF4 dimensions)
coverage : string, optional
Dataset coverage start and end
units : string
Data units
long_name : string
Dataset long name
fillvalue : float
Value of undefined or missings data values
Methods
-------
copy()
Return a deep copy of the current object.
set_coords(coords_data, coords_name=None)
Set coordinates of data.
set_coverage(coverage, force=False)
Set the coverage attribute, as (coverageStart, coverageEnd),
set_units(units, force=False)
Set the units attribute.
set_fillvalue()
Set fillvalue of floating-point data.
set_long_name(name, force=False)
Set the long_name attribute.
fill_as_nan()
Replace missing floating-point data with NaN's.
sort(axis=0)
Sort dataset according a coordinate axis.
concatenate(msm, axis=0)
Concatenate the data of a S5Pmsm to the current S5Pmsm object.
nanpercentile(vperc, data_sel=None, axis=0, keepdims=False)
Returns percentile(s) of the data in the S5Pmsm object.
biweight(data_sel=None, axis=0, keepdims=False)
Returns biweight median of the data in the S5Pmsm object.
nanmedian(data_sel=None, axis=0, keepdims=False)
Returns S5Pmsm object containing median & standard deviation of the
original data.
nanmean(data_sel=None, axis=0, keepdims=False)
Returns S5Pmsm object containing mean & standard deviation of the
original data.
transpose()
Transpose data and coordinates of an S5Pmsm object.
Notes
-----
Examples
--------
"""
def __init__(self, dset, data_sel=None, datapoint=False):
"""
Read measurement data from a Tropomi OCAL, ICM, of L1B product
Parameters
----------
dset : h5py.Dataset or ndarray
h5py dataset from which the data is read, data is used to
initalize S5Pmsm object
data_sel : numpy slice
a numpy slice generated for example numpy.s_
datapoint : boolean
to indicate that the dataset is a compound of type datapoint
Returns
-------
numpy structure with dataset data and attributes, including data,
fillvalue, coordinates, units, ...
"""
# initialize object
self.name = 'value'
self.value = None
self.error = None
self.coords = None
self.coverage = None
self.units = None
self.long_name = ''
self.fillvalue = None
if isinstance(dset, Dataset):
self.__from_h5_dset(dset, data_sel, datapoint)
else:
self.__from_ndarray(dset, data_sel)
def __repr__(self) -> str:
res = []
for key, value in self.__dict__.items():
if key.startswith('__'):
continue
if isinstance(value, np.ndarray):
res.append(f'{key}: {value.shape}')
else:
res.append(f'{key}: {value}')
return '\n'.join(res)
def __from_h5_dset(self, h5_dset, data_sel, datapoint):
"""
initialize S5Pmsm object from h5py dataset
"""
self.name = PurePath(h5_dset.name).name
# copy dataset values (and error) to object
if data_sel is None:
if datapoint:
self.value = h5_dset['value']
self.error = h5_dset['error']
else:
self.value = h5_dset[...]
else:
# we need to keep all dimensions to get the dimensions
# of the output data right
if datapoint:
self.value = h5_dset['value'][data_sel]
self.error = h5_dset['error'][data_sel]
if isinstance(data_sel, tuple):
for ii, elmnt in enumerate(data_sel):
if isinstance(elmnt, (int, np.int64)):
self.value = np.expand_dims(self.value, axis=ii)
self.error = np.expand_dims(self.error, axis=ii)
else:
self.value = h5_dset[data_sel]
if isinstance(data_sel, tuple):
for ii, elmnt in enumerate(data_sel):
if isinstance(elmnt, (int, np.int64)):
self.value = np.expand_dims(self.value, axis=ii)
# set default dimension names
if h5_dset.ndim == 1:
keys_default = ['column']
elif h5_dset.ndim == 2:
keys_default = ['row', 'column']
elif h5_dset.ndim == 3:
keys_default = ['time', 'row', 'column']
else:
raise ValueError('not implemented for ndim > 3')
# copy all dimensions with size longer then 1
keys = []
dims = []
for ii in range(h5_dset.ndim):
if self.value.shape[ii] == 1:
continue
if len(h5_dset.dims[ii]) != 1: # bug in some KMNI HDF5 files
keys.append(keys_default[ii])
dims.append(np.arange(self.value.shape[ii]))
elif self.value.shape[ii] == h5_dset.shape[ii]:
buff = PurePath(h5_dset.dims[ii][0].name).name
if len(buff.split()) > 1:
buff = buff.split()[0]
keys.append(buff)
if h5_dset.dims[ii][0][:].size == h5_dset.shape[ii]:
buff = h5_dset.dims[ii][0][:]
if np.all(buff == 0):
buff = np.arange(buff.size)
else: # bug in some KMNI HDF5 files
buff = np.arange(h5_dset.shape[ii])
dims.append(buff)
else:
buff = PurePath(h5_dset.dims[ii][0].name).name
if len(buff.split()) > 1:
buff = buff.split()[0]
keys.append(buff)
if h5_dset.dims[ii][0][:].size == h5_dset.shape[ii]:
buff = h5_dset.dims[ii][0][:]
if np.all(buff == 0):
buff = np.arange(buff.size)
else: # bug in some KMNI HDF5 files
buff = np.arange(h5_dset.shape[ii])
if isinstance(data_sel, slice):
dims.append(buff[data_sel])
elif len(data_sel) == h5_dset.ndim:
dims.append(buff[data_sel[ii]])
elif not isinstance(data_sel, tuple):
dims.append(buff[data_sel])
elif ii > len(data_sel):
dims.append(buff[data_sel[-1]])
else:
dims.append(buff[data_sel[ii]])
# add dimensions as a namedtuple
coords_namedtuple = namedtuple('Coords', keys)
self.coords = coords_namedtuple._make(dims)
# remove all dimensions with size equal 1 from value (and error)
self.value = np.squeeze(self.value)
if datapoint:
self.error = np.squeeze(self.error)
# copy FillValue (same for value/error in a datapoint)
if datapoint:
self.fillvalue = h5_dset.fillvalue[0]
else:
self.fillvalue = h5_dset.fillvalue
# copy its units
if 'units' in h5_dset.attrs:
if isinstance(h5_dset.attrs['units'], np.ndarray):
if h5_dset.attrs['units'].size == 1:
self.units = h5_dset.attrs['units'][0]
if isinstance(self.units, bytes):
self.units = self.units.decode('ascii')
else:
self.units = h5_dset.attrs['units']
if isinstance(self.units[0], bytes):
self.units = self.units.astype(str)
else:
self.units = h5_dset.attrs['units']
if isinstance(self.units, bytes):
self.units = self.units.decode('ascii')
# copy its long_name
if 'long_name' in h5_dset.attrs:
if isinstance(h5_dset.attrs['long_name'], bytes):
self.long_name = h5_dset.attrs['long_name'].decode('ascii')
else:
self.long_name = h5_dset.attrs['long_name']
def __from_ndarray(self, data, data_sel):
"""
initialize S5Pmsm object from a ndarray
"""
# copy dataset values (and error) to object
if data_sel is None:
self.value = np.squeeze(data)
else:
self.value = np.squeeze(data[data_sel])
# define coordinates
dims = [np.arange(sz) for sz in self.value.shape]
try:
self.set_coords(dims, coords_name=None)
except Exception as exc:
raise RuntimeError('failed to set the coordinates') from exc
def copy(self):
"""
return a deep copy of the current object
"""
return deepcopy(self)
def set_coords(self, coords_data, coords_name=None):
"""
Set coordinates of data
Parameters
----------
coords_data : list of ndarrays
list with coordinates data for each dimension
coords_name : list of strings
list with the names of each dimension
"""
if coords_name is None:
if len(coords_data) == 1:
keys = ['column']
elif len(coords_data) == 2:
keys = ['row', 'column']
elif len(coords_data) == 3:
keys = ['time', 'row', 'column']
else:
raise ValueError('not implemented for ndim > 3')
else:
if isinstance(coords_name, str):
keys = [coords_name]
else:
keys = coords_name
# add dimensions as a namedtuple
coords_namedtuple = namedtuple('Coords', keys)
self.coords = coords_namedtuple._make(coords_data)
def set_coverage(self, coverage, force=False):
"""
Set the coverage attribute, as (coverageStart, coverageEnd)
Both elements are expected to be datatime objects.
Overwrite when force is true
"""
if self.coverage is None or force:
self.coverage = coverage
def set_units(self, units, force=False):
"""
Set the units attribute, overwrite when force is true
"""
if self.units is None or force:
self.units = units
def set_fillvalue(self):
"""
Set fillvalue to KNMI undefined
"""
if np.issubdtype(self.value.dtype, np.floating):
if self.fillvalue is None or self.fillvalue == 0.:
self.fillvalue = float.fromhex('0x1.ep+122')
def set_long_name(self, name, force=False):
"""
Set the long_name attribute, overwrite when force is true
"""
if force or not self.long_name:
self.long_name = name
def fill_as_nan(self):
"""
Replace fillvalues in data with NaN's
Works only on datasets with HDF5 datatype 'float' or 'datapoints'
"""
if self.fillvalue == float.fromhex('0x1.ep+122'):
self.value[(self.value == self.fillvalue)] = np.nan
if self.error is not None:
self.error[(self.error == self.fillvalue)] = np.nan
def sort(self, axis=0):
"""
Sort data and its coordinate along a given axis
Parameters
----------
axis : int, optional
axis for which the array will be sorted. Default is 0.
"""
if not isinstance(axis, int):
raise TypeError('axis not an integer')
if not 0 <= axis < self.value.ndim:
raise ValueError('axis out-of-range')
indx = np.argsort(self.coords[axis][:])
self.coords[axis][:] = self.coords[axis][indx]
if axis == 0:
self.value = self.value[indx, ...]
if self.error is not None:
if isinstance(self.error, list):
self.error = (self.error[0][indx, ...],
self.error[1][indx, ...])
else:
self.error = self.error[indx, ...]
elif axis == 1:
self.value = self.value[:, indx, ...]
if self.error is not None:
if isinstance(self.error, list):
self.error = (self.error[0][:, indx, :],
self.error[1][:, indx, :])
else:
self.error = self.error[:, indx, :]
elif axis == 2:
self.value = self.value[:, :, indx]
if self.error is not None:
if isinstance(self.error, list):
self.error = (self.error[0][:, :, indx],
self.error[1][:, :, indx])
else:
self.error = self.error[:, :, indx]
else:
raise ValueError("S5Pmsm: implemented for ndim <= 3")
def concatenate(self, msm, axis=0):
"""
Concatenate two measurement datasets, the current with another.
Parameters
----------
msm : pys5p.S5Pmsm
an S5Pmsm object
axis : int, optional
The axis for which the array will be joined. Default is 0.
Returns
-------
The data of the new dataset is concatenated to the existing data along
an existing axis. The affected coordinate is also extended.
Note:
- The arrays must have the same shape, except in the dimension
corresponding to axis (the first, by default).
"""
if self.name != PurePath(msm.name).name:
raise TypeError('combining dataset with different name')
if self.error is None and msm.error is None:
datapoint = False
elif self.error is not None and msm.error is not None:
datapoint = True
else:
raise RuntimeError("S5Pmsm: combining non-datapoint and datapoint")
# all but the last 2 dimensions have to be equal
if self.value.shape[:-2] != msm.value.shape[:-2]:
raise TypeError('all but the last 2 dimensions should be equal')
if axis == 0:
self.value = np.concatenate((self.value, msm.value), axis=axis)
if datapoint:
self.error = np.concatenate((self.error, msm.error),
axis=axis)
elif axis == 1:
if self.value.shape[0] == msm.value.shape[0]:
self.value = np.concatenate((self.value, msm.value), axis=axis)
if datapoint:
self.error = np.concatenate((self.error, msm.error),
axis=axis)
else:
self.value = np.concatenate(pad_rows(self.value, msm.value),
axis=axis)
if datapoint:
self.error = np.concatenate(
pad_rows(self.error, msm.error), axis=axis)
elif axis == 2:
if self.value.shape[1] == msm.value.shape[1]:
self.value = np.concatenate((self.value, msm.value), axis=axis)
if datapoint:
self.error = np.concatenate((self.error, msm.error),
axis=axis)
else:
self.value = np.concatenate(pad_rows(self.value, msm.value),
axis=axis)
if datapoint:
self.error = np.concatenate(
pad_rows(self.error, msm.error), axis=axis)
else:
raise ValueError("S5Pmsm: implemented for ndim <= 3")
# now extent coordinate of the fastest axis
key = self.coords._fields[axis]
if msm.coords[axis][0] == 0:
dims = np.concatenate((self.coords[axis],
len(self.coords[axis]) + msm.coords[axis]))
else:
dims = np.concatenate((self.coords[axis], msm.coords[axis]))
self.coords = self.coords._replace(**{key: dims})
return self
def nanpercentile(self, vperc, data_sel=None, axis=0, keepdims=False):
"""
Returns percentile(s) of the data in the S5Pmsm
Parameters
----------
vperc : list
range to normalize luminance data between percentiles min and max of
array data.
data_sel : numpy slice
A numpy slice generated for example numpy.s_. Can be used to skip
the first and/or last frame
axis : int, optional
Axis or axes along which the medians are computed. Default is 0.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original arr.
Returns
-------
S5Pmsm object with the original data replaced by the percentiles along
one of the axis, see below. The coordinates are adjusted, accordingly.
You should atleast supply one percentile and atmost three.
vperc is instance 'int' or len(vperc) == 1:
'value' is replaced by its (nan-)percentile vperc
'error' is unchanged
len(vperc) == 2:
'vperc' is sorted
'value' is replaced by its (nan-)median
'error' is replaced by percentile('value', (vperc[0], vperc[1]))
len(vperc) == 3:
'vperc' is sorted
'value' is replaced by percentile('value', vperc[1])
'error' is replaced by percentile('value', (vperc[0], vperc[2]))
"""
if isinstance(axis, int):
axis = (axis,)
if isinstance(vperc, int):
vperc = (vperc,)
else:
if len(vperc) == 2:
vperc += (50,)
# make sure that the values are sorted
vperc = tuple(sorted(vperc))
if len(vperc) != 1 and len(vperc) != 3:
raise TypeError('dimension vperc must be 1 or 3')
if data_sel is None:
if self.value.size <= 1 or self.value.ndim <= max(axis):
return self
perc = np.nanpercentile(self.value, vperc,
axis=axis, keepdims=keepdims)
else:
if self.value[data_sel].size <= 1 \
or self.value[data_sel].ndim <= max(axis):
return self
perc = np.nanpercentile(self.value[data_sel], vperc,
axis=axis, keepdims=keepdims)
if len(vperc) == 3:
self.value = perc[1, ...]
self.error = [perc[0, ...], perc[2, ...]]
else:
self.value = perc[0, ...]
# adjust the coordinates
if keepdims:
key = self.coords._fields[axis]
if self.coords[axis][0] == 0:
dims = [0]
else:
dims = np.median(self.coords[axis], keepdims=keepdims)
self.coords = self.coords._replace(**{key: dims})
else:
keys = []
dims = []
for ii in range(self.value.ndim+len(axis)):
if ii not in axis:
keys.append(self.coords._fields[ii])
dims.append(self.coords[ii][:])
coords_namedtuple = namedtuple('Coords', keys)
self.coords = coords_namedtuple._make(dims)
return self
def biweight(self, data_sel=None, axis=0, keepdims=False):
"""
Returns biweight median of the data in the S5Pmsm
Parameters
----------
data_sel : numpy slice
A numpy slice generated for example numpy.s_. Can be used to skip
the first and/or last frame
axis : int, optional
Axis or axes along which the medians are computed. Default is 0.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original arr.
Returns
-------
S5Pmsm object with its data (value & error) replaced by its biweight
medians along one axis. The coordinates are adjusted, accordingly.
"""
if data_sel is None:
if self.error is not None:
self.value = biweight(self.value, axis=axis)
self.error = biweight(self.error, axis=axis)
else:
(self.value, self.error) = biweight(self.value,
axis=axis, spread=True)
else:
if self.error is not None:
self.value = biweight(self.value[data_sel], axis=axis)
self.error = biweight(self.error[data_sel], axis=axis)
else:
(self.value, self.error) = biweight(self.value[data_sel],
axis=axis, spread=True)
if keepdims:
self.value = np.expand_dims(self.value, axis=axis)
self.error = np.expand_dims(self.error, axis=axis)
# adjust the coordinates
if keepdims:
key = self.coords._fields[axis]
if self.coords[axis][0] == 0:
dims = [0]
else:
dims = np.median(self.coords[axis], keepdims=keepdims)
self.coords = self.coords._replace(**{key: dims})
else:
keys = []
dims = []
for ii in range(self.value.ndim+1):
if ii != axis:
keys.append(self.coords._fields[ii])
dims.append(self.coords[ii][:])
coords_namedtuple = namedtuple('Coords', keys)
self.coords = coords_namedtuple._make(dims)
return self
def nanmedian(self, data_sel=None, axis=0, keepdims=False):
"""
Returns S5Pmsm object containing median & standard deviation of the
original data
Parameters
----------
data_sel : numpy slice, optional
A numpy slice generated for example numpy.s_. Can be used to skip
the first and/or last frame
axis : int, optional
Axis or axes along which the medians are computed. Default is 0.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original arr.
Returns
-------
S5Pmsm object with its data (value & error) replaced by its nanmedian
and standard deviation along one axis.
The coordinates are adjusted, accordingly.
"""
if data_sel is None:
if self.error is not None:
self.error = np.nanmedian(self.error,
axis=axis, keepdims=keepdims)
else:
self.error = np.nanstd(self.value, ddof=1,
axis=axis, keepdims=keepdims)
self.value = np.nanmedian(self.value, axis=axis, keepdims=keepdims)
else:
if self.error is not None:
self.error = np.nanmedian(self.error[data_sel],
axis=axis, keepdims=keepdims)
else:
self.error = np.nanstd(self.value[data_sel], ddof=1,
axis=axis, keepdims=keepdims)
self.value = np.nanmedian(self.value[data_sel],
axis=axis, keepdims=keepdims)
# adjust the coordinates
if keepdims:
key = self.coords._fields[axis]
if self.coords[axis][0] == 0:
dims = [0]
else:
dims = np.median(self.coords[axis], keepdims=keepdims)
self.coords = self.coords._replace(**{key: dims})
else:
keys = []
dims = []
for ii in range(self.value.ndim+1):
if ii != axis:
keys.append(self.coords._fields[ii])
dims.append(self.coords[ii][:])
coords_namedtuple = namedtuple('Coords', keys)
self.coords = coords_namedtuple._make(dims)
return self
def nanmean(self, data_sel=None, axis=0, keepdims=False):
"""
Returns S5Pmsm object containing mean & standard deviation of the
original data
Parameters
----------
data_sel : numpy slice, optional
A numpy slice generated for example numpy.s_. Can be used to skip
the first and/or last frame
axis : int, optional
Axis or axes along which the mean are computed. Default is 0.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original arr.
Returns
-------
S5Pmsm object with its data (value & error) replaced by its nanmean and
standard deviation along one axis.
The coordinates are adjusted, accordingly.
"""
if data_sel is None:
if self.error is not None:
self.error = np.nanmean(self.error,
axis=axis, keepdims=keepdims)
else:
self.error = np.nanstd(self.value, ddof=1,
axis=axis, keepdims=keepdims)
self.value = np.nanmean(self.value, axis=axis, keepdims=keepdims)
else:
if self.error is not None:
self.error = np.nanmean(self.error[data_sel],
axis=axis, keepdims=keepdims)
else:
self.error = np.nanstd(self.value[data_sel], ddof=1,
axis=axis, keepdims=keepdims)
self.value = np.nanmean(self.value[data_sel],
axis=axis, keepdims=keepdims)
# adjust the coordinates
if keepdims:
key = self.coords._fields[axis]
if self.coords[axis][0] == 0:
dims = [0]
else:
dims = np.mean(self.coords[axis], keepdims=keepdims)
self.coords = self.coords._replace(**{key: dims})
else:
keys = []
dims = []
for ii in range(self.value.ndim+1):
if ii != axis:
keys.append(self.coords._fields[ii])
dims.append(self.coords[ii][:])
coords_namedtuple = namedtuple('Coords', keys)
self.coords = coords_namedtuple._make(dims)
return self
def transpose(self):
"""
Transpose data and coordinates of an S5Pmsm object
"""
if self.value.ndim <= 1:
return self
if self.error is not None:
self.error = np.transpose(self.error)
self.value = np.transpose(self.value)
keys = []
dims = []
for ii in range(self.value.ndim):
keys.append(self.coords._fields[ii])
dims.append(self.coords[ii][:])
tmp = keys[1]
keys[1] = keys[0]
keys[0] = tmp
tmp = dims[1]
dims[1] = dims[0]
dims[0] = tmp
coords_namedtuple = namedtuple('Coords', keys)
self.coords = coords_namedtuple._make(dims)
return self
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace, model_helper, brew
import caffe2.python.hypothesis_test_util as htu
import hypothesis.strategies as st
from hypothesis import given
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.net = core.Net("test-net")
self.testblob_ref = self.net.ConstantFill(
[], "testblob", shape=[1, 2, 3, 4], value=1.0)
workspace.ResetWorkspace()
def testRootFolder(self):
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.RootFolder(), ".")
self.assertEqual(
workspace.ResetWorkspace("/tmp/caffe-workspace-test"), True)
self.assertEqual(workspace.RootFolder(), "/tmp/caffe-workspace-test")
def testWorkspaceHasBlobWithNonexistingName(self):
self.assertEqual(workspace.HasBlob("non-existing"), False)
def testRunOperatorOnce(self):
self.assertEqual(
workspace.RunOperatorOnce(
self.net.Proto().op[0].SerializeToString()
), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
blobs = workspace.Blobs()
self.assertEqual(len(blobs), 1)
self.assertEqual(blobs[0], "testblob")
def testGetOperatorCost(self):
op = core.CreateOperator(
"Conv2D",
["X", "W"], ["Y"],
stride_h=1,
stride_w=1,
pad_t=1,
pad_l=1,
pad_b=1,
pad_r=1,
kernel=3,
)
X = np.zeros((1, 8, 8, 8))
W = np.zeros((1, 1, 3, 3))
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
flops, _ = workspace.GetOperatorCost(op.SerializeToString(), ["X", "W"])
self.assertEqual(flops, 1152)
def testRunNetOnce(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testCurrentWorkspaceWrapper(self):
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertIn("testblob", workspace.C.Workspace.current.blobs)
workspace.ResetWorkspace()
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
def testRunPlan(self):
plan = core.Plan("test-plan")
plan.AddStep(core.ExecutionStep("test-step", self.net))
self.assertEqual(
workspace.RunPlan(plan.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testConstructPlanFromSteps(self):
step = core.ExecutionStep("test-step-as-plan", self.net)
self.assertEqual(workspace.RunPlan(step), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testResetWorkspace(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.HasBlob("testblob"), False)
def testTensorAccess(self):
ws = workspace.C.Workspace()
""" test in-place modification """
ws.create_blob("tensor").feed(np.array([1.1, 1.2, 1.3]))
tensor = ws.blobs["tensor"].tensor()
tensor.data[0] = 3.3
val = np.array([3.3, 1.2, 1.3])
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" test in-place initialization """
tensor.init([2, 3], core.DataType.INT32)
tensor.data[1, 1] = 100
val = np.zeros([2, 3], dtype=np.int32)
val[1, 1] = 100
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" strings cannot be initialized from python """
with self.assertRaises(RuntimeError):
tensor.init([3, 4], core.DataType.STRING)
""" feed (copy) data into tensor """
val = np.array([[b'abc', b'def'], [b'ghi', b'jkl']], dtype=np.object)
tensor.feed(val)
self.assertEquals(tensor.data[0, 0], b'abc')
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
val = np.array([1.1, 10.2])
tensor.feed(val)
val[0] = 5.2
self.assertEquals(tensor.data[0], 1.1)
""" fetch (copy) data from tensor """
val = np.array([1.1, 1.2])
tensor.feed(val)
val2 = tensor.fetch()
tensor.data[0] = 5.2
val3 = tensor.fetch()
np.testing.assert_array_equal(val, val2)
self.assertEquals(val3[0], 5.2)
def testFetchFeedBlob(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobViaBlobReference(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob(self.testblob_ref)
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob(self.testblob_ref, fetched), True)
fetched_again = workspace.FetchBlob("testblob") # fetch by name now
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobTypes(self):
for dtype in [np.float16, np.float32, np.float64, np.bool,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16]:
try:
rng = np.iinfo(dtype).max * 2
except ValueError:
rng = 1000
data = ((np.random.rand(2, 3, 4) - 0.5) * rng).astype(dtype)
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, dtype)
np.testing.assert_array_equal(fetched_back, data)
def testFetchFeedBlobBool(self):
"""Special case for bool to ensure coverage of both true and false."""
data = np.zeros((2, 3, 4)).astype(np.bool)
data.flat[::2] = True
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, np.bool)
np.testing.assert_array_equal(fetched_back, data)
def testFetchFeedBlobZeroDim(self):
data = np.empty(shape=(2, 0, 3), dtype=np.float32)
self.assertEqual(workspace.FeedBlob("testblob_empty", data), True)
fetched_back = workspace.FetchBlob("testblob_empty")
self.assertEqual(fetched_back.shape, (2, 0, 3))
self.assertEqual(fetched_back.dtype, np.float32)
def testFetchFeedLongStringTensor(self):
# long strings trigger array of object creation
strs = np.array([
b' '.join(10 * [b'long string']),
b' '.join(128 * [b'very long string']),
b'small \0\1\2 string',
b"Hello, world! I have special \0 symbols \1!"])
workspace.FeedBlob('my_str_tensor', strs)
strs2 = workspace.FetchBlob('my_str_tensor')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedShortStringTensor(self):
# small strings trigger NPY_STRING array
strs = np.array([b'elem1', b'elem 2', b'element 3'])
workspace.FeedBlob('my_str_tensor_2', strs)
strs2 = workspace.FetchBlob('my_str_tensor_2')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedPlainString(self):
# this is actual string, not a tensor of strings
s = b"Hello, world! I have special \0 symbols \1!"
workspace.FeedBlob('my_plain_string', s)
s2 = workspace.FetchBlob('my_plain_string')
self.assertEqual(s, s2)
def testFetchBlobs(self):
s1 = b"test1"
s2 = b"test2"
workspace.FeedBlob('s1', s1)
workspace.FeedBlob('s2', s2)
fetch1, fetch2 = workspace.FetchBlobs(['s1', 's2'])
self.assertEquals(s1, fetch1)
self.assertEquals(s2, fetch2)
def testFetchFeedViaBlobDict(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.blobs["testblob"]
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
workspace.blobs["testblob"] = fetched
fetched_again = workspace.blobs["testblob"]
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
self.assertTrue("testblob" in workspace.blobs)
self.assertFalse("non_existant" in workspace.blobs)
self.assertEqual(len(workspace.blobs), 1)
for key in workspace.blobs:
self.assertEqual(key, "testblob")
class TestMultiWorkspaces(unittest.TestCase):
def setUp(self):
workspace.SwitchWorkspace("default")
workspace.ResetWorkspace()
def testCreateWorkspace(self):
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.SwitchWorkspace("test", True), None)
self.assertEqual(workspace.HasBlob("testblob"), False)
self.assertEqual(workspace.SwitchWorkspace("default"), None)
self.assertEqual(workspace.HasBlob("testblob"), True)
try:
# The following should raise an error.
workspace.SwitchWorkspace("non-existing")
# so this should never happen.
self.assertEqual(True, False)
except RuntimeError:
pass
workspaces = workspace.Workspaces()
self.assertTrue("default" in workspaces)
self.assertTrue("test" in workspaces)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
class TestWorkspaceGPU(test_util.TestCase):
def setUp(self):
workspace.ResetWorkspace()
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.net.RunAllOnGPU()
def testFetchBlobGPU(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testGetCudaPeerAccessPattern(self):
pattern = workspace.GetCudaPeerAccessPattern()
self.assertEqual(type(pattern), np.ndarray)
self.assertEqual(pattern.ndim, 2)
self.assertEqual(pattern.shape[0], pattern.shape[1])
self.assertEqual(pattern.shape[0], workspace.NumCudaDevices())
@unittest.skipIf(not workspace.C.has_mkldnn, "No MKLDNN support.")
class TestWorkspaceMKLDNN(test_util.TestCase):
def testFeedFetchBlobMKLDNN(self):
arr = np.random.randn(2, 3).astype(np.float32)
workspace.FeedBlob(
"testblob_mkldnn", arr, core.DeviceOption(caffe2_pb2.MKLDNN))
fetched = workspace.FetchBlob("testblob_mkldnn")
np.testing.assert_array_equal(arr, fetched)
class TestImmedibate(test_util.TestCase):
def testImmediateEnterExit(self):
workspace.StartImmediate(i_know=True)
self.assertTrue(workspace.IsImmediate())
workspace.StopImmediate()
self.assertFalse(workspace.IsImmediate())
def testImmediateRunsCorrectly(self):
workspace.StartImmediate(i_know=True)
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.ImmediateBlobs(), ["testblob"])
content = workspace.FetchImmediate("testblob")
# Also, the immediate mode should not invade the original namespace,
# so we check if this is so.
with self.assertRaises(RuntimeError):
workspace.FetchBlob("testblob")
np.testing.assert_array_equal(content, 1.0)
content[:] = 2.0
self.assertTrue(workspace.FeedImmediate("testblob", content))
np.testing.assert_array_equal(
workspace.FetchImmediate("testblob"), 2.0)
workspace.StopImmediate()
with self.assertRaises(RuntimeError):
content = workspace.FetchImmediate("testblob")
def testImmediateRootFolder(self):
workspace.StartImmediate(i_know=True)
# for testing we will look into the _immediate_root_folder variable
# but in normal usage you should not access that.
self.assertTrue(len(workspace._immediate_root_folder) > 0)
root_folder = workspace._immediate_root_folder
self.assertTrue(os.path.isdir(root_folder))
workspace.StopImmediate()
self.assertTrue(len(workspace._immediate_root_folder) == 0)
# After termination, immediate mode should have the root folder
# deleted.
self.assertFalse(os.path.exists(root_folder))
class TestCppEnforceAsException(test_util.TestCase):
def testEnforce(self):
op = core.CreateOperator("Relu", ["X"], ["Y"])
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
class TestCWorkspace(htu.HypothesisTestCase):
def test_net_execution(self):
ws = workspace.C.Workspace()
self.assertEqual(ws.nets, {})
self.assertEqual(ws.blobs, {})
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
ws.create_net(net)
# If we do not specify overwrite, this should raise an error.
with self.assertRaises(RuntimeError):
ws.create_net(net)
# But, if we specify overwrite, this should pass.
ws.create_net(net, True)
# Overwrite can also be a kwarg.
ws.create_net(net, overwrite=True)
self.assertIn("testblob", ws.blobs)
self.assertEqual(len(ws.nets), 1)
net_name = net.Proto().name
self.assertIn("test-net", net_name)
net = ws.nets[net_name].run()
blob = ws.blobs["testblob"]
np.testing.assert_array_equal(
np.ones((1, 2, 3, 4), dtype=np.float32),
blob.fetch())
@given(name=st.text(), value=st.floats(min_value=-1, max_value=1.0))
def test_operator_run(self, name, value):
ws = workspace.C.Workspace()
op = core.CreateOperator(
"ConstantFill", [], [name], shape=[1], value=value)
ws.run(op)
self.assertIn(name, ws.blobs)
np.testing.assert_allclose(
[value], ws.blobs[name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_run(self, blob_name, net_name, value):
ws = workspace.C.Workspace()
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.run(net)
self.assertIn(blob_name, ws.blobs)
self.assertNotIn(net_name, ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
plan_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_plan_run(self, blob_name, plan_name, net_name, value):
ws = workspace.C.Workspace()
plan = core.Plan(plan_name)
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
plan.AddStep(core.ExecutionStep("step", nets=[net], num_iter=1))
ws.run(plan)
self.assertIn(blob_name, ws.blobs)
self.assertIn(net.Name(), ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_create(self, blob_name, net_name, value):
ws = workspace.C.Workspace()
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.create_net(net).run()
self.assertIn(blob_name, ws.blobs)
self.assertIn(net.Name(), ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(name=st.text(),
value=htu.tensor(),
device_option=st.sampled_from(htu.device_options))
def test_array_serde(self, name, value, device_option):
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value, device_option=device_option)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
np.testing.assert_equal(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
np.testing.assert_equal(value, serde_blob.fetch())
@given(name=st.text(), value=st.text())
def test_string_serde(self, name, value):
value = value.encode('ascii', 'ignore')
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
self.assertEqual(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
self.assertEqual(value, serde_blob.fetch())
def test_exception(self):
ws = workspace.C.Workspace()
with self.assertRaises(TypeError):
ws.create_net("...")
class TestPredictor(unittest.TestCase):
def _create_model(self):
m = model_helper.ModelHelper()
y = brew.fc(m, "data", "y",
dim_in=4, dim_out=2,
weight_init=('ConstantFill', dict(value=1.0)),
bias_init=('ConstantFill', dict(value=0.0)),
axis=0)
m.net.AddExternalOutput(y)
return m
# Use this test with a bigger model to see how using Predictor allows to
# avoid issues with low protobuf size limit in Python
#
# def test_predictor_predefined(self):
# workspace.ResetWorkspace()
# path = 'caffe2/caffe2/test/assets/'
# with open(path + 'squeeze_predict_net.pb') as f:
# self.predict_net = f.read()
# with open(path + 'squeeze_init_net.pb') as f:
# self.init_net = f.read()
# self.predictor = workspace.Predictor(self.init_net, self.predict_net)
# inputs = [np.zeros((1, 3, 256, 256), dtype='f')]
# outputs = self.predictor.run(inputs)
# self.assertEqual(len(outputs), 1)
# self.assertEqual(outputs[0].shape, (1, 1000, 1, 1))
# self.assertAlmostEqual(outputs[0][0][0][0][0], 5.19026289e-05)
def test_predictor_memory_model(self):
workspace.ResetWorkspace()
m = self._create_model()
workspace.FeedBlob("data", np.zeros([4], dtype='float32'))
self.predictor = workspace.Predictor(
workspace.StringifyProto(m.param_init_net.Proto()),
workspace.StringifyProto(m.net.Proto()))
inputs = np.array([1, 3, 256, 256], dtype='float32')
outputs = self.predictor.run([inputs])
np.testing.assert_array_almost_equal(
np.array([[516, 516]], dtype='float32'), outputs)
class TestTransform(htu.HypothesisTestCase):
@given(input_dim=st.integers(min_value=1, max_value=10),
output_dim=st.integers(min_value=1, max_value=10),
batch_size=st.integers(min_value=1, max_value=10))
def test_simple_transform(self, input_dim, output_dim, batch_size):
m = model_helper.ModelHelper()
fc1 = brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
conv = brew.conv(m, fc2, "conv",
dim_in=output_dim,
dim_out=output_dim,
use_cudnn=True,
engine="CUDNN",
kernel=3)
conv.Relu([], conv)\
.Softmax([], "pred") \
.LabelCrossEntropy(["label"], ["xent"]) \
.AveragedLoss([], "loss")
transformed_net_proto = workspace.ApplyTransform(
"ConvToNNPack",
m.net.Proto())
self.assertEqual(transformed_net_proto.op[2].engine, "NNPACK")
@given(input_dim=st.integers(min_value=1, max_value=10),
output_dim=st.integers(min_value=1, max_value=10),
batch_size=st.integers(min_value=1, max_value=10))
def test_registry_invalid(self, input_dim, output_dim, batch_size):
m = model_helper.ModelHelper()
brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
with self.assertRaises(RuntimeError):
workspace.ApplyTransform(
"definitely_not_a_real_transform",
m.net.Proto())
@given(value=st.floats(min_value=-1, max_value=1))
def test_apply_transform_if_faster(self, value):
init_net = core.Net("init_net")
init_net.ConstantFill([], ["data"], shape=[5, 5, 5, 5], value=value)
init_net.ConstantFill([], ["conv_w"], shape=[5, 5, 3, 3], value=value)
init_net.ConstantFill([], ["conv_b"], shape=[5], value=value)
self.assertEqual(
workspace.RunNetOnce(init_net.Proto().SerializeToString()), True)
m = model_helper.ModelHelper()
conv = brew.conv(m, "data", "conv",
dim_in=5,
dim_out=5,
kernel=3,
use_cudnn=True,
engine="CUDNN")
conv.Relu([], conv)\
.Softmax([], "pred") \
.AveragedLoss([], "loss")
self.assertEqual(
workspace.RunNetOnce(m.net.Proto().SerializeToString()), True)
proto = workspace.ApplyTransformIfFaster(
"ConvToNNPack",
m.net.Proto(),
init_net.Proto())
self.assertEqual(
workspace.RunNetOnce(proto.SerializeToString()), True)
proto = workspace.ApplyTransformIfFaster(
"ConvToNNPack",
m.net.Proto(),
init_net.Proto(),
warmup_runs=10,
main_runs=100,
improvement_threshold=2.0)
self.assertEqual(
workspace.RunNetOnce(proto.SerializeToString()), True)
if __name__ == '__main__':
unittest.main()
|
|
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys, os
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occured.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class TestRequest(unittest.TestCase):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
try:
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.url, exp1)
req = oauth.Request(method, url2)
self.assertEquals(req.url, exp2)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
'foo': 'baz',
'bar': 'foo'
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
self.assertEquals(params, dict(parse_qsl(req.to_postdata())))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertEquals(urllib.urlencode(sorted(params.items())), res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'TQ6vGQ5A6IZn8dmeGB4+/Jl3EMI=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'blah': 599999
}
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
if __name__ == "__main__":
unittest.main()
|
|
"""Some test functions for bivariate interpolation.
Most of these have been yoinked from ACM TOMS 792.
http://netlib.org/toms/792
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import numpy as np
from .triangulate import Triangulation
class TestData(dict):
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self.__dict__ = self
class TestDataSet(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
data = TestData(
franke100=TestDataSet(
x=np.array([0.0227035, 0.0539888, 0.0217008, 0.0175129, 0.0019029,
-0.0509685, 0.0395408, -0.0487061, 0.0315828, -0.0418785,
0.1324189, 0.1090271, 0.1254439, 0.093454, 0.0767578,
0.1451874, 0.0626494, 0.1452734, 0.0958668, 0.0695559,
0.2645602, 0.2391645, 0.208899, 0.2767329, 0.1714726,
0.2266781, 0.1909212, 0.1867647, 0.2304634, 0.2426219,
0.3663168, 0.3857662, 0.3832392, 0.3179087, 0.3466321,
0.3776591, 0.3873159, 0.3812917, 0.3795364, 0.2803515,
0.4149771, 0.4277679, 0.420001, 0.4663631, 0.4855658,
0.4092026, 0.4792578, 0.4812279, 0.3977761, 0.4027321,
0.5848691, 0.5730076, 0.6063893, 0.5013894, 0.5741311,
0.6106955, 0.5990105, 0.5380621, 0.6096967, 0.5026188,
0.6616928, 0.6427836, 0.6396475, 0.6703963, 0.7001181,
0.633359, 0.6908947, 0.6895638, 0.6718889, 0.6837675,
0.7736939, 0.7635332, 0.7410424, 0.8258981, 0.7306034,
0.8086609, 0.8214531, 0.729064, 0.8076643, 0.8170951,
0.8424572, 0.8684053, 0.8366923, 0.9418461, 0.8478122,
0.8599583, 0.91757, 0.8596328, 0.9279871, 0.8512805,
1.044982, 0.9670631, 0.9857884, 0.9676313, 1.0129299,
0.965704, 1.0019855, 1.0359297, 1.0414677, 0.9471506]),
y=np.array([-0.0310206, 0.1586742, 0.2576924, 0.3414014, 0.4943596,
0.5782854, 0.6993418, 0.7470194, 0.9107649, 0.996289,
0.050133, 0.0918555, 0.2592973, 0.3381592, 0.4171125,
0.5615563, 0.6552235, 0.7524066, 0.9146523, 0.9632421,
0.0292939, 0.0602303, 0.2668783, 0.3696044, 0.4801738,
0.5940595, 0.6878797, 0.8185576, 0.9046507, 0.9805412,
0.0396955, 0.0684484, 0.2389548, 0.3124129, 0.4902989,
0.5199303, 0.6445227, 0.8203789, 0.8938079, 0.9711719,
-0.0284618, 0.1560965, 0.2262471, 0.3175094, 0.3891417,
0.5084949, 0.6324247, 0.7511007, 0.8489712, 0.9978728,
-0.0271948, 0.127243, 0.2709269, 0.3477728, 0.4259422,
0.6084711, 0.6733781, 0.7235242, 0.9242411, 1.0308762,
0.0255959, 0.0707835, 0.2008336, 0.3259843, 0.4890704,
0.5096324, 0.669788, 0.7759569, 0.9366096, 1.0064516,
0.0285374, 0.1021403, 0.1936581, 0.3235775, 0.4714228,
0.6091595, 0.6685053, 0.8022808, 0.847679, 1.0512371,
0.0380499, 0.0902048, 0.2083092, 0.3318491, 0.4335632,
0.5910139, 0.6307383, 0.8144841, 0.904231, 0.969603,
-0.01209, 0.1334114, 0.2695844, 0.3795281, 0.4396054,
0.5044425, 0.6941519, 0.7459923, 0.8682081, 0.9801409])),
franke33=TestDataSet(
x=np.array([5.00000000e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.00000000e-01, 1.00000000e-01,
1.50000000e-01, 2.00000000e-01, 2.50000000e-01,
3.00000000e-01, 3.50000000e-01, 5.00000000e-01,
5.00000000e-01, 5.50000000e-01, 6.00000000e-01,
6.00000000e-01, 6.00000000e-01, 6.50000000e-01,
7.00000000e-01, 7.00000000e-01, 7.00000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.00000000e-01, 8.00000000e-01, 8.50000000e-01,
9.00000000e-01, 9.00000000e-01, 9.50000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([4.50000000e-01, 5.00000000e-01, 1.00000000e+00,
0.00000000e+00, 1.50000000e-01, 7.50000000e-01,
3.00000000e-01, 1.00000000e-01, 2.00000000e-01,
3.50000000e-01, 8.50000000e-01, 0.00000000e+00,
1.00000000e+00, 9.50000000e-01, 2.50000000e-01,
6.50000000e-01, 8.50000000e-01, 7.00000000e-01,
2.00000000e-01, 6.50000000e-01, 9.00000000e-01,
1.00000000e-01, 3.50000000e-01, 8.50000000e-01,
4.00000000e-01, 6.50000000e-01, 2.50000000e-01,
3.50000000e-01, 8.00000000e-01, 9.00000000e-01,
0.00000000e+00, 5.00000000e-01, 1.00000000e+00])),
lawson25=TestDataSet(
x=np.array([0.1375, 0.9125, 0.7125, 0.225, -0.05, 0.475, 0.05,
0.45, 1.0875, 0.5375, -0.0375, 0.1875, 0.7125, 0.85,
0.7, 0.275, 0.45, 0.8125, 0.45, 1., 0.5,
0.1875, 0.5875, 1.05, 0.1]),
y=np.array([0.975, 0.9875, 0.7625, 0.8375, 0.4125, 0.6375,
-0.05, 1.0375, 0.55, 0.8, 0.75, 0.575,
0.55, 0.4375, 0.3125, 0.425, 0.2875, 0.1875,
-0.0375, 0.2625, 0.4625, 0.2625, 0.125, -0.06125,
0.1125])),
random100=TestDataSet(
x=np.array([0.0096326, 0.0216348, 0.029836, 0.0417447, 0.0470462,
0.0562965, 0.0646857, 0.0740377, 0.0873907, 0.0934832,
0.1032216, 0.1110176, 0.1181193, 0.1251704, 0.132733,
0.1439536, 0.1564861, 0.1651043, 0.1786039, 0.1886405,
0.2016706, 0.2099886, 0.2147003, 0.2204141, 0.2343715,
0.240966, 0.252774, 0.2570839, 0.2733365, 0.2853833,
0.2901755, 0.2964854, 0.3019725, 0.3125695, 0.3307163,
0.3378504, 0.3439061, 0.3529922, 0.3635507, 0.3766172,
0.3822429, 0.3869838, 0.3973137, 0.4170708, 0.4255588,
0.4299218, 0.4372839, 0.4705033, 0.4736655, 0.4879299,
0.494026, 0.5055324, 0.5162593, 0.5219219, 0.5348529,
0.5483213, 0.5569571, 0.5638611, 0.5784908, 0.586395,
0.5929148, 0.5987839, 0.6117561, 0.6252296, 0.6331381,
0.6399048, 0.6488972, 0.6558537, 0.6677405, 0.6814074,
0.6887812, 0.6940896, 0.7061687, 0.7160957, 0.7317445,
0.7370798, 0.746203, 0.7566957, 0.7699998, 0.7879347,
0.7944014, 0.8164468, 0.8192794, 0.8368405, 0.8500993,
0.8588255, 0.8646496, 0.8792329, 0.8837536, 0.8900077,
0.8969894, 0.9044917, 0.9083947, 0.9203972, 0.9347906,
0.9434519, 0.9490328, 0.9569571, 0.9772067, 0.9983493]),
y=np.array([0.3083158, 0.2450434, 0.8613847, 0.0977864, 0.3648355,
0.7156339, 0.5311312, 0.9755672, 0.1781117, 0.5452797,
0.1603881, 0.7837139, 0.9982015, 0.6910589, 0.104958,
0.8184662, 0.7086405, 0.4456593, 0.1178342, 0.3189021,
0.9668446, 0.7571834, 0.2016598, 0.3232444, 0.4368583,
0.8907869, 0.064726, 0.5692618, 0.2947027, 0.4332426,
0.3347464, 0.7436284, 0.1066265, 0.8845357, 0.515873,
0.9425637, 0.4799701, 0.1783069, 0.114676, 0.8225797,
0.2270688, 0.4073598, 0.887508, 0.7631616, 0.9972804,
0.4959884, 0.3410421, 0.249812, 0.6409007, 0.105869,
0.5411969, 0.0089792, 0.8784268, 0.5515874, 0.4038952,
0.1654023, 0.2965158, 0.3660356, 0.0366554, 0.950242,
0.2638101, 0.9277386, 0.5377694, 0.7374676, 0.4674627,
0.9186109, 0.0416884, 0.1291029, 0.6763676, 0.8444238,
0.3273328, 0.1893879, 0.0645923, 0.0180147, 0.8904992,
0.4160648, 0.4688995, 0.2174508, 0.5734231, 0.8853319,
0.8018436, 0.6388941, 0.8931002, 0.1000558, 0.2789506,
0.9082948, 0.3259159, 0.8318747, 0.0508513, 0.970845,
0.5120548, 0.2859716, 0.9581641, 0.6183429, 0.3779934,
0.4010423, 0.9478657, 0.7425486, 0.8883287, 0.549675])),
uniform9=TestDataSet(
x=np.array([1.25000000e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00])),
)
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x * 9
y = y * 9
x1 = x + 1.0
x2 = x - 2.0
x4 = x - 4.0
x7 = x - 7.0
y1 = x + 1.0
y2 = y - 2.0
y3 = y - 3.0
y7 = y - 7.0
f = (0.75 * np.exp(-(x2 * x2 + y2 * y2) / 4.0) +
0.75 * np.exp(-x1 * x1 / 49.0 - y1 / 10.0) +
0.5 * np.exp(-(x7 * x7 + y3 * y3) / 4.0) -
0.2 * np.exp(-x4 * x4 - y7 * y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0 * (y - x) + 1.0) / 9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4 * y)) / (6.0 + 6.0 * (3 * x - 1.0) ** 2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64 - 81 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle, 0, 100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0 * np.cos(10.0 * x) * np.sin(10.0 * y) + np.sin(10.0 * x * y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0 - 10.0 * x
y = 5.0 - 10.0 * y
g1 = np.exp(-x * x / 2)
g2 = np.exp(-y * y / 2)
f = g1 + 0.75 * g2 * (1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0 - 20.0 * x) / 3.0)
ey = np.exp((10.0 - 20.0 * y) / 3.0)
logitx = 1.0 / (1.0 + ex)
logity = 1.0 / (1.0 + ey)
f = (((20.0 / 3.0) ** 3 * ex * ey) ** 2 * (logitx * logity) ** 5 *
(ex - 2.0 * logitx) * (ey - 2.0 * logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80 * x - 40.0, 90 * y - 45.)
f = np.exp(-0.04 * circle) * np.cos(0.15 * circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss,
cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0),
nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange + self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
import matplotlib as mpl
from matplotlib import pylab as pl
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
else:
y, x = np.mgrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
pl.ioff()
pl.clf()
pl.hot() # Some like it hot
if plotter == 'imshow':
pl.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent,
origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[
self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
pl.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(
np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]),
colors=[(0, 0, 0, 0.2)])
ax = pl.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
pl.title('%s: %s' % (func.title, title))
else:
pl.title(title)
pl.show()
pl.ion()
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange + self.yrange)
def plotallfuncs(allfuncs=allfuncs):
from matplotlib import pylab as pl
pl.ioff()
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
print(func.title)
nnt.plot(func, interp=False, plotter='imshow')
pl.savefig('%s-ref-img.png' % func.__name__)
nnt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-nn-img.png' % func.__name__)
lpt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-lin-img.png' % func.__name__)
nnt.plot(func, interp=False, plotter='contour')
pl.savefig('%s-ref-con.png' % func.__name__)
nnt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-nn-con.png' % func.__name__)
lpt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-lin-con.png' % func.__name__)
pl.ion()
def plot_dt(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 0, 0, 0.2)]
lc = mpl.collections.LineCollection(
np.array([((tri.x[i], tri.y[i]), (tri.x[j], tri.y[j]))
for i, j in tri.edge_db]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_vo(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0, 1, 0, 0.2)]
lc = mpl.collections.LineCollection(np.array(
[(tri.circumcenters[i], tri.circumcenters[j])
for i in xrange(len(tri.circumcenters))
for j in tri.triangle_neighbors[i] if j != -1]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_cc(tri, edgecolor=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if edgecolor is None:
edgecolor = (0, 0, 1, 0.2)
dxy = (np.array([(tri.x[i], tri.y[i]) for i, j, k in tri.triangle_nodes])
- tri.circumcenters)
r = np.hypot(dxy[:, 0], dxy[:, 1])
ax = pl.gca()
for i in xrange(len(r)):
p = mpl.patches.Circle(tri.circumcenters[i], r[i],
resolution=100, edgecolor=edgecolor,
facecolor=(1, 1, 1, 0), linewidth=0.2)
ax.add_patch(p)
pl.draw_if_interactive()
def quality(func, mesh, interpolator='nn', n=33):
"""Compute a quality factor (the quantity r**2 from TOMS792).
interpolator must be in ('linear', 'nn').
"""
fz = func(mesh.x, mesh.y)
tri = Triangulation(mesh.x, mesh.y)
intp = getattr(tri,
interpolator + '_extrapolator')(fz, bbox=(0., 1., 0., 1.))
Y, X = np.mgrid[0:1:complex(0, n), 0:1:complex(0, n)]
Z = func(X, Y)
iz = intp[0:1:complex(0, n), 0:1:complex(0, n)]
#nans = np.isnan(iz)
#numgood = n*n - np.sum(np.array(nans.flat, np.int32))
numgood = n * n
SE = (Z - iz) ** 2
SSE = np.sum(SE.flat)
meanZ = np.sum(Z.flat) / numgood
SM = (Z - meanZ) ** 2
SSM = np.sum(SM.flat)
r2 = 1.0 - SSE / SSM
print(func.__name__, r2, SSE, SSM, numgood)
return r2
def allquality(interpolator='nn', allfuncs=allfuncs, data=data, n=33):
results = {}
kv = list(six.iteritems(data))
kv.sort()
for name, mesh in kv:
reslist = results.setdefault(name, [])
for func in allfuncs:
reslist.append(quality(func, mesh, interpolator, n))
return results
def funky():
x0 = np.array([0.25, 0.3, 0.5, 0.6, 0.6])
y0 = np.array([0.2, 0.35, 0.0, 0.25, 0.65])
tx = 0.46
ty = 0.23
t0 = Triangulation(x0, y0)
t1 = Triangulation(np.hstack((x0, [tx])), np.hstack((y0, [ty])))
return t0, t1
|
|
#!/usr/bin/env python
import sys
if len(sys.argv) < 2:
print "Provide the integer size in bytes"
sys.exit(1)
size = int(sys.argv[1])
full_rows = size // 10
init_size = size % 10
if init_size == 0:
full_rows = full_rows - 1
init_size = 10
def rx(i):
return i + 2
def ry(i):
return i + 12
def emit(line, *args):
s = '"' + line + r' \n\t"'
print s % args
#### set up registers
emit("adiw r30, %s", size - init_size) # move z
emit("adiw r28, %s", size - init_size) # move y
for i in xrange(init_size):
emit("ld r%s, x+", rx(i))
for i in xrange(init_size):
emit("ld r%s, y+", ry(i))
emit("ldi r25, 0")
print ""
if init_size == 1:
emit("mul r2, r12")
emit("st z+, r0")
emit("st z+, r1")
else:
#### first two multiplications of initial block
emit("ldi r23, 0")
emit("mul r2, r12")
emit("st z+, r0")
emit("mov r22, r1")
print ""
emit("ldi r24, 0")
emit("mul r2, r13")
emit("add r22, r0")
emit("adc r23, r1")
emit("mul r3, r12")
emit("add r22, r0")
emit("adc r23, r1")
emit("adc r24, r25")
emit("st z+, r22")
print ""
#### rest of initial block, with moving accumulator registers
acc = [23, 24, 22]
for r in xrange(2, init_size):
emit("ldi r%s, 0", acc[2])
for i in xrange(0, r+1):
emit("mul r%s, r%s", rx(i), ry(r - i))
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("adc r%s, r25", acc[2])
emit("st z+, r%s", acc[0])
print ""
acc = acc[1:] + acc[:1]
for r in xrange(1, init_size-1):
emit("ldi r%s, 0", acc[2])
for i in xrange(0, init_size-r):
emit("mul r%s, r%s", rx(r+i), ry((init_size-1) - i))
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("adc r%s, r25", acc[2])
emit("st z+, r%s", acc[0])
print ""
acc = acc[1:] + acc[:1]
emit("mul r%s, r%s", rx(init_size-1), ry(init_size-1))
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("st z+, r%s", acc[0])
emit("st z+, r%s", acc[1])
print ""
#### reset y and z pointers
emit("sbiw r30, %s", 2 * init_size + 10)
emit("sbiw r28, %s", init_size + 10)
#### load y registers
for i in xrange(10):
emit("ld r%s, y+", ry(i))
#### load additional x registers
for i in xrange(init_size, 10):
emit("ld r%s, x+", rx(i))
print ""
prev_size = init_size
for row in xrange(full_rows):
#### do x = 0-9, y = 0-9 multiplications
emit("ldi r23, 0")
emit("mul r2, r12")
emit("st z+, r0")
emit("mov r22, r1")
print ""
emit("ldi r24, 0")
emit("mul r2, r13")
emit("add r22, r0")
emit("adc r23, r1")
emit("mul r3, r12")
emit("add r22, r0")
emit("adc r23, r1")
emit("adc r24, r25")
emit("st z+, r22")
print ""
acc = [23, 24, 22]
for r in xrange(2, 10):
emit("ldi r%s, 0", acc[2])
for i in xrange(0, r+1):
emit("mul r%s, r%s", rx(i), ry(r - i))
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("adc r%s, r25", acc[2])
emit("st z+, r%s", acc[0])
print ""
acc = acc[1:] + acc[:1]
#### now we need to start shifting x and loading from z
x_regs = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
for r in xrange(0, prev_size):
x_regs = x_regs[1:] + x_regs[:1]
emit("ld r%s, x+", x_regs[9]) # load next byte of left
emit("ldi r%s, 0", acc[2])
for i in xrange(0, 10):
emit("mul r%s, r%s", x_regs[i], ry(9 - i))
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("adc r%s, r25", acc[2])
emit("ld r0, z") # load stored value from initial block, and add to accumulator (note z does not increment)
emit("add r%s, r0", acc[0])
emit("adc r%s, r25", acc[1])
emit("adc r%s, r25", acc[2])
emit("st z+, r%s", acc[0]) # store next byte (z increments)
print ""
acc = acc[1:] + acc[:1]
# done shifting x, start shifting y
y_regs = [12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
for r in xrange(0, prev_size):
y_regs = y_regs[1:] + y_regs[:1]
emit("ld r%s, y+", y_regs[9]) # load next byte of right
emit("ldi r%s, 0", acc[2])
for i in xrange(0, 10):
emit("mul r%s, r%s", x_regs[i], y_regs[9 -i])
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("adc r%s, r25", acc[2])
emit("ld r0, z") # load stored value from initial block, and add to accumulator (note z does not increment)
emit("add r%s, r0", acc[0])
emit("adc r%s, r25", acc[1])
emit("adc r%s, r25", acc[2])
emit("st z+, r%s", acc[0]) # store next byte (z increments)
print ""
acc = acc[1:] + acc[:1]
# done both shifts, do remaining corner
for r in xrange(1, 9):
emit("ldi r%s, 0", acc[2])
for i in xrange(0, 10-r):
emit("mul r%s, r%s", x_regs[r+i], y_regs[9 - i])
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("adc r%s, r25", acc[2])
emit("st z+, r%s", acc[0])
print ""
acc = acc[1:] + acc[:1]
emit("mul r%s, r%s", x_regs[9], y_regs[9])
emit("add r%s, r0", acc[0])
emit("adc r%s, r1", acc[1])
emit("st z+, r%s", acc[0])
emit("st z+, r%s", acc[1])
print ""
prev_size = prev_size + 10
if row < full_rows - 1:
#### reset x, y and z pointers
emit("sbiw r30, %s", 2 * prev_size + 10)
emit("sbiw r28, %s", prev_size + 10)
emit("sbiw r26, %s", prev_size)
#### load x and y registers
for i in xrange(10):
emit("ld r%s, x+", rx(i))
emit("ld r%s, y+", ry(i))
print ""
emit("eor r1, r1")
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.engine.clients.os import cinder as c_plugin
from heat.engine.resources.openstack.cinder import qos_specs
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
QOS_SPECS_TEMPLATE = {
'heat_template_version': '2015-10-15',
'description': 'Cinder QoS specs creation example',
'resources': {
'my_qos_specs': {
'type': 'OS::Cinder::QoSSpecs',
'properties': {
'name': 'foobar',
'specs': {"foo": "bar", "foo1": "bar1"}
}
}
}
}
QOS_ASSOCIATE_TEMPLATE = {
'heat_template_version': '2015-10-15',
'description': 'Cinder QoS specs association example',
'resources': {
'my_qos_associate': {
'type': 'OS::Cinder::QoSAssociation',
'properties': {
'volume_types': ['ceph', 'lvm'],
'qos_specs': 'foobar'
}
}
}
}
class QoSSpecsTest(common.HeatTestCase):
def setUp(self):
super(QoSSpecsTest, self).setUp()
self.ctx = utils.dummy_context()
self.patchobject(c_plugin.CinderClientPlugin, 'has_extension',
return_value=True)
self.stack = stack.Stack(
self.ctx, 'cinder_qos_spec_test_stack',
template.Template(QOS_SPECS_TEMPLATE)
)
self.my_qos_specs = self.stack['my_qos_specs']
cinder_client = mock.MagicMock()
self.cinderclient = mock.MagicMock()
self.my_qos_specs.client = cinder_client
cinder_client.return_value = self.cinderclient
self.qos_specs = self.cinderclient.qos_specs
self.value = mock.MagicMock()
self.value.id = '927202df-1afb-497f-8368-9c2d2f26e5db'
self.value.name = 'foobar'
self.value.specs = {'foo': 'bar', 'foo1': 'bar1'}
self.qos_specs.create.return_value = self.value
def test_resource_mapping(self):
mapping = qos_specs.resource_mapping()
self.assertEqual(2, len(mapping))
self.assertEqual(qos_specs.QoSSpecs,
mapping['OS::Cinder::QoSSpecs'])
self.assertIsInstance(self.my_qos_specs,
qos_specs.QoSSpecs)
def _set_up_qos_specs_environment(self):
self.qos_specs.create.return_value = self.value
self.my_qos_specs.handle_create()
def test_qos_specs_handle_create_specs(self):
self._set_up_qos_specs_environment()
self.assertEqual(1, self.qos_specs.create.call_count)
self.assertEqual(self.value.id, self.my_qos_specs.resource_id)
def test_qos_specs_handle_update_specs(self):
self._set_up_qos_specs_environment()
resource_id = self.my_qos_specs.resource_id
prop_diff = {'specs': {'foo': 'bar', 'bar': 'bar'}}
set_expected = {'bar': 'bar'}
unset_expected = ['foo1']
self.my_qos_specs.handle_update(
json_snippet=None, tmpl_diff=None, prop_diff=prop_diff
)
self.qos_specs.set_keys.assert_called_once_with(
resource_id,
set_expected
)
self.qos_specs.unset_keys.assert_called_once_with(
resource_id,
unset_expected
)
def test_qos_specs_handle_delete_specs(self):
self._set_up_qos_specs_environment()
resource_id = self.my_qos_specs.resource_id
self.my_qos_specs.handle_delete()
self.qos_specs.disassociate_all.assert_called_once_with(resource_id)
class QoSAssociationTest(common.HeatTestCase):
def setUp(self):
super(QoSAssociationTest, self).setUp()
self.ctx = utils.dummy_context()
self.qos_specs_id = 'foobar'
self.patchobject(c_plugin.CinderClientPlugin, 'has_extension',
return_value=True)
self.patchobject(c_plugin.CinderClientPlugin, 'get_qos_specs',
return_value=self.qos_specs_id)
self.stack = stack.Stack(
self.ctx, 'cinder_qos_associate_test_stack',
template.Template(QOS_ASSOCIATE_TEMPLATE)
)
self.my_qos_associate = self.stack['my_qos_associate']
cinder_client = mock.MagicMock()
self.cinderclient = mock.MagicMock()
self.my_qos_associate.client = cinder_client
cinder_client.return_value = self.cinderclient
self.qos_specs = self.cinderclient.qos_specs
self.stub_QoSSpecsConstraint_validate()
self.stub_VolumeTypeConstraint_validate()
self.vt_ceph = 'ceph'
self.vt_lvm = 'lvm'
self.vt_new_ceph = 'new_ceph'
def test_resource_mapping(self):
mapping = qos_specs.resource_mapping()
self.assertEqual(2, len(mapping))
self.assertEqual(qos_specs.QoSAssociation,
mapping['OS::Cinder::QoSAssociation'])
self.assertIsInstance(self.my_qos_associate,
qos_specs.QoSAssociation)
def _set_up_qos_associate_environment(self):
self.my_qos_associate.handle_create()
def test_qos_associate_handle_create(self):
self.patchobject(c_plugin.CinderClientPlugin, 'get_volume_type',
side_effect=[self.vt_ceph, self.vt_lvm])
self._set_up_qos_associate_environment()
self.cinderclient.qos_specs.associate.assert_any_call(
self.qos_specs_id,
self.vt_ceph
)
self.qos_specs.associate.assert_any_call(
self.qos_specs_id,
self.vt_lvm
)
def test_qos_associate_handle_update(self):
self.patchobject(c_plugin.CinderClientPlugin, 'get_volume_type',
side_effect=[self.vt_lvm, self.vt_ceph,
self.vt_new_ceph,
self.vt_ceph])
self._set_up_qos_associate_environment()
prop_diff = {'volume_types': [self.vt_lvm, self.vt_new_ceph]}
self.my_qos_associate.handle_update(
json_snippet=None, tmpl_diff=None, prop_diff=prop_diff
)
self.qos_specs.associate.assert_any_call(
self.qos_specs_id,
self.vt_new_ceph
)
self.qos_specs.disassociate.assert_any_call(
self.qos_specs_id,
self.vt_ceph
)
def test_qos_associate_handle_delete_specs(self):
self.patchobject(c_plugin.CinderClientPlugin, 'get_volume_type',
side_effect=[self.vt_ceph, self.vt_lvm,
self.vt_ceph, self.vt_lvm])
self._set_up_qos_associate_environment()
self.my_qos_associate.handle_delete()
self.qos_specs.disassociate.assert_any_call(
self.qos_specs_id,
self.vt_ceph
)
self.qos_specs.disassociate.assert_any_call(
self.qos_specs_id,
self.vt_lvm
)
|
|
#!/usr/bin/env python
import os
import struct
import sys
import math
import warnings
from psr_constants import ARCSECTORAD
telescope_ids = {"Fake": 0, "Arecibo": 1, "ARECIBO 305m": 1,
"Ooty": 2, "Nancay": 3, "Parkes": 4, "Jodrell": 5,
"GBT": 6, "GMRT": 7, "Effelsberg": 8, "ATA": 9,
"SRT": 10, "LOFAR": 11, "VLA": 12, "CHIME": 20,
"FAST": 21, "MeerKAT": 64, "KAT-7": 65}
ids_to_telescope = dict(zip(telescope_ids.values(), telescope_ids.keys()))
machine_ids = {"FAKE": 0, "PSPM": 1, "Wapp": 2, "WAPP": 2, "AOFTM": 3,
"BCPM1": 4, "BPP": 4, "OOTY": 5, "SCAMP": 6,
"GBT Pulsar Spigot": 7, "SPIGOT": 7, "BG/P": 11,
"PDEV": 12, "CHIME+PSR": 20, "KAT": 64, "KAT-DC2": 65}
ids_to_machine = dict(zip(machine_ids.values(), machine_ids.keys()))
header_params = {
"HEADER_START": 'flag',
"telescope_id": 'i',
"machine_id": 'i',
"data_type": 'i',
"rawdatafile": 'str',
"source_name": 'str',
"barycentric": 'i',
"pulsarcentric": 'i',
"az_start": 'd',
"za_start": 'd',
"src_raj": 'd',
"src_dej": 'd',
"tstart": 'd',
"tsamp": 'd',
"nbits": 'i',
"nsamples": 'i',
"nbeams": "i",
"ibeam": "i",
"fch1": 'd',
"foff": 'd',
"FREQUENCY_START": 'flag',
"fchannel": 'd',
"FREQUENCY_END": 'flag',
"nchans": 'i',
"nifs": 'i',
"refdm": 'd',
"period": 'd',
"npuls": 'q',
"nbins": 'i',
"HEADER_END": 'flag'}
def dec2radians(src_dej):
"""
dec2radians(src_dej):
Convert the SIGPROC-style DDMMSS.SSSS declination to radians
"""
sign = 1.0
if (src_dej < 0): sign = -1.0;
xx = math.fabs(src_dej)
dd = int(math.floor(xx / 10000.0))
mm = int(math.floor((xx - dd * 10000.0) / 100.0))
ss = xx - dd * 10000.0 - mm * 100.0
return sign * ARCSECTORAD * (60.0 * (60.0 * dd + mm) + ss)
def ra2radians(src_raj):
"""
ra2radians(src_raj):
Convert the SIGPROC-style HHMMSS.SSSS right ascension to radians
"""
return 15.0 * dec2radians(src_raj)
def read_doubleval(filfile, stdout=False):
dblval = struct.unpack('d', filfile.read(8))[0]
if stdout:
print " double value = '%20.15f'"%dblval
return dblval
def read_intval(filfile, stdout=False):
intval = struct.unpack('i', filfile.read(4))[0]
if stdout:
print " int value = '%d'"%intval
return intval
def read_longintval(filfile, stdout=False):
longintval = struct.unpack('q', filfile.read(8))[0]
if stdout:
print " long int value = '%d'"%longintval
return longintval
def read_string(filfile, stdout=False):
strlen = struct.unpack('i', filfile.read(4))[0]
strval = filfile.read(strlen)
if stdout:
print " string = '%s'"%strval
return strval
def read_paramname(filfile, stdout=False):
paramname = read_string(filfile, stdout=False)
if stdout:
print "Read '%s'"%paramname
return paramname
def read_hdr_val(filfile, stdout=False):
paramname = read_paramname(filfile, stdout)
if header_params[paramname] == 'd':
return paramname, read_doubleval(filfile, stdout)
elif header_params[paramname] == 'i':
return paramname, read_intval(filfile, stdout)
elif header_params[paramname] == 'q':
return paramname, read_longintval(filfile, stdout)
elif header_params[paramname] == 'str':
return paramname, read_string(filfile, stdout)
elif header_params[paramname] == 'flag':
return paramname, None
else:
print "Warning: key '%s' is unknown!" % paramname
return None, None
def prep_string(string):
return struct.pack('i', len(string))+string
def prep_double(name, value):
return prep_string(name)+struct.pack('d', float(value))
def prep_int(name, value):
return prep_string(name)+struct.pack('i', int(value))
def addto_hdr(paramname, value):
if header_params[paramname] == 'd':
return prep_double(paramname, value)
elif header_params[paramname] == 'i':
return prep_int(paramname, value)
elif header_params[paramname] == 'str':
return prep_string(paramname) + prep_string(value)
elif header_params[paramname] == 'flag':
return prep_string(paramname)
else:
warnings.warning("key '%s' is unknown!" % paramname)
return hdr
def read_header(infile):
"""
read_header(infile):
Read a SIGPROC-style header and return the keys/values in a dictionary,
as well as the length of the header: (hdrdict, hdrlen)
"""
hdrdict = {}
if type(infile) == type("abc"):
infile = open(infile)
param = ""
while (param != "HEADER_END"):
param, val = read_hdr_val(infile, stdout=False)
hdrdict[param] = val
hdrlen = infile.tell()
infile.close()
return hdrdict, hdrlen
def samples_per_file(infile, hdrdict, hdrlen):
"""
samples_per_file(infile, hdrdict, hdrlen):
Given an input SIGPROC-style filterbank file and a header
dictionary and length (as returned by read_header()),
return the number of (time-domain) samples in the file.
"""
numbytes = os.stat(infile)[6] - hdrlen
bytes_per_sample = hdrdict['nchans'] * (hdrdict['nbits']/8)
if numbytes % bytes_per_sample:
print "Warning!: File does not appear to be of the correct length!"
numsamples = numbytes / bytes_per_sample
return numsamples
if __name__ == "__main__":
if len(sys.argv)==1:
print "\nusage: mod_filterbank_hdr.py infile.fil [outfile.fil]\n"
sys.exit()
filhdr = {}
newhdr = ""
infile = open(sys.argv[1], 'rb')
# Loop over the values in the .fil file
while 1:
param, val = read_hdr_val(infile, stdout=False)
filhdr[param] = val
# Add lines here to correct stuff
#if param=="nchans": val = 768
# Append to the new hdr string
newhdr += addto_hdr(param, val)
# Break out of the loop if the header is over
if param=="HEADER_END": break
if len(sys.argv) > 2:
print "Writing new header to '%s'"%sys.argv[2]
outfile = open(sys.argv[2], 'wb')
outfile.write(newhdr)
outfile.close()
else:
print filhdr
|
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The WebDriver implementation."""
from command import Command
from ..common.exceptions import ErrorInResponseException
from ..common.exceptions import InvalidSwitchToTargetException
from ..common.exceptions import NoSuchElementException
from errorhandler import ErrorHandler
import logging
import utils
from webelement import WebElement
from remote_connection import RemoteConnection
class WebDriver(object):
"""Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol as defined
here:
http://code.google.com/p/selenium/wiki/JsonWireProtocol
Attributes:
command_executor - The command.CommandExecutor object used to execute
commands.
error_handler - errorhandler.ErrorHandler object used to verify that the
server did not return an error.
session_id - The session ID to send with every command.
capabilities - A dictionary of capabilities of the underlying browser for
this instance's session.
"""
def __init__(self, command_executor, browser_name, platform, version='',
javascript_enabled=True):
"""Create a new driver that will issue commands using the wire protocol.
Args:
command_executor - Either a command.CommandExecutor object or a string
that specifies the URL of a remote server to send commands to.
browser_name - A string indicating which browser to request a new
session for from the remote server. Should be one of
{mobile safari|firefox|internet explorer|htmlunit|chrome}.
platform - A string indicating the desired platform to request from
the remote server. Should be one of
{WINDOWS|XP|VISTA|MAC|LINUX|UNIX|ANY}.
version - A string indicating a specific browser version to request,
or an empty string ot use any available browser. Defaults to the
empty string.
javascript_enabled - Whether the requested browser should support
JavaScript. Defaults to True.
"""
self.command_executor = command_executor
if type(self.command_executor) is str:
self.command_executor = RemoteConnection(command_executor)
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
self.start_session(browser_name=browser_name,
platform=platform,
version=version,
javascript_enabled=javascript_enabled)
@property
def name(self):
"""Returns the name of the underlying browser for this instance."""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""Called before starting a new session.
This method may be overridden to define custom startup behavior.
"""
pass
def stop_client(self):
"""Called after executing a quit command.
This method may be overridden to define custom shutdown behavior.
"""
pass
def start_session(self, browser_name, platform=None, version=None,
javascript_enabled=False):
"""Creates a new session with the desired capabilities.
Args:
browser_name: The name of the browser to request.
version: Which browser version to request.
platform: Which platform to request the browser on.
javascript_enabled: Whether the new session should support JavaScript.
"""
response = self._execute(Command.NEW_SESSION, {
'desiredCapabilities': {
'browserName': browser_name,
'platform': platform or 'ANY',
'version': version or '',
'javascriptEnabled': javascript_enabled
}
})
self.session_id = response['sessionId']
self.capabilities = response['value']
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, WebElement):
return {'ELEMENT': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
return WebElement(self, element_id)
def _unwrap_value(self, value):
if isinstance(value, dict) and 'ELEMENT' in value:
return self.create_web_element(value['ELEMENT'])
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def _execute(self, driver_command, params=None):
"""Sends a command to be executed by a command.CommandExecutor.
Args:
driver_command: The name of the command to execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None}
def get(self, url):
"""Loads a web page in the current browser."""
self._execute(Command.GET, {'url': url})
def get_title(self):
"""Gets the title of the current page."""
resp = self._execute(Command.GET_TITLE)
return resp['value']
def find_element_by_id(self, id_):
"""Finds element by id."""
return self._find_element_by("id", id_)
def find_elements_by_xpath(self, xpath):
"""Finds multiple elements by xpath."""
return self._find_elements_by("xpath", xpath)
def find_element_by_xpath(self, xpath):
"""Finds an element by xpath."""
return self._find_element_by("xpath", xpath)
def find_element_by_link_text(self, link_text):
"""Finds an element by its link text."""
return self._find_element_by("link text", link_text)
def find_element_by_partial_link_text(self, link_text):
"""Finds an element by a partial match of its link text."""
return self._find_element_by("partial link text", link_text)
def find_elements_by_partial_link_text(self, link_text):
"""Finds elements by a partial match of their link text."""
return self._find_elements_by("partial link text", link_text)
def find_element_by_name(self, name):
"""Finds an element by its name."""
return self._find_element_by("name", name)
def find_elements_by_name(self, name):
"""Finds elements by their name."""
return self._find_elements_by("name", name)
def find_element_by_tag_name(self, name):
"""Finds an element by its tag name."""
return self._find_element_by("tag name", name)
def find_elements_by_tag_name(self, name):
"""Finds elements by their tag name."""
return self._find_elements_by("tag name", name)
def execute_script(self, script, *args):
if len(args) == 1:
converted_args = args[0]
else:
converted_args = list(args)
converted_args = list(args)
return self._execute(
Command.EXECUTE_SCRIPT,
{'script': script, 'args':converted_args})['value']
def get_current_url(self):
"""Gets the current url."""
return self._execute(Command.GET_CURRENT_URL)['value']
def get_page_source(self):
"""Gets the page source."""
return self._execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""Closes the current window."""
self._execute(Command.CLOSE)
def quit(self):
"""Quits the driver and close every associated window."""
try:
self._execute(Command.QUIT)
finally:
self.stop_client()
def get_current_window_handle(self):
return self._execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
def get_window_handles(self):
return self._execute(Command.GET_WINDOW_HANDLES)['value']
def switch_to_active_element(self):
"""Returns the element with focus, or BODY if nothing has focus."""
return self._execute(Command.GET_ACTIVE_ELEMENT)['value']
def switch_to_window(self, window_name):
"""Switches focus to a window."""
self._execute(Command.SWITCH_TO_WINDOW, {'name': window_name})
def switch_to_frame(self, index_or_name):
"""Switches focus to a frame by index or name."""
self._execute(Command.SWITCH_TO_FRAME, {'id': index_or_name})
def back(self):
"""Goes back in browser history."""
self._execute(Command.GO_BACK)
def forward(self):
"""Goes forward in browser history."""
self._execute(Command.GO_FORWARD)
# Options
def get_cookies(self):
"""Gets all the cookies. Return a set of dicts."""
return self._execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""Get a single cookie. Returns the desired cookie dict or None."""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""Delete a cookie with the given name."""
self._execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""Delete all the cookies."""
self._execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
self._execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
def _find_element_by(self, by, value):
return self._execute(Command.FIND_ELEMENT,
{'using': by, 'value': value})['value']
def _find_elements_by(self, by, value):
return self._execute(Command.FIND_ELEMENTS,
{'using': by, 'value': value})['value']
|
|
"""Creates beautiful visualizations of the publication database."""
import datetime
import numpy as np
from astropy import log
from matplotlib import pyplot as pl
import matplotlib.patheffects as path_effects
import matplotlib as mpl
from . import SCIENCES
# Configure the aesthetics
mpl.rcParams["figure.figsize"] = (10, 6)
mpl.rcParams["interactive"] = False
mpl.rcParams["lines.antialiased"] = True
# Patches
mpl.rcParams["patch.linewidth"] = 0.5
mpl.rcParams["patch.facecolor"] = "348ABD"
mpl.rcParams["patch.edgecolor"] = "eeeeee"
mpl.rcParams["patch.antialiased"] = True
# Font
mpl.rcParams["font.family"] = "sans-serif"
mpl.rcParams["font.size"] = 16
mpl.rcParams["font.sans-serif"] = "Open Sans"
mpl.rcParams["text.color"] = "333333"
# Axes
mpl.rcParams["axes.facecolor"] = "ecf0f1"
mpl.rcParams["axes.edgecolor"] = "bdc3c7"
mpl.rcParams["axes.linewidth"] = 1.0
mpl.rcParams["axes.grid"] = False
mpl.rcParams["axes.titlesize"] = "x-large"
mpl.rcParams["axes.labelsize"] = "x-large"
mpl.rcParams["axes.labelweight"] = "normal"
mpl.rcParams["axes.labelcolor"] = "333333"
mpl.rcParams["axes.axisbelow"] = True
mpl.rcParams["axes.unicode_minus"] = True
# Ticks
mpl.rcParams["xtick.color"] = "333333"
mpl.rcParams["ytick.color"] = "333333"
mpl.rcParams["xtick.major.size"] = 0
mpl.rcParams["ytick.major.size"] = 0
# Grid
mpl.rcParams["grid.color"] = "bdc3c7"
mpl.rcParams["grid.linestyle"] = "-"
mpl.rcParams["grid.linewidth"] = 1
def plot_by_year(db,
output_fn='kpub-publication-rate.pdf',
first_year=2009,
barwidth=0.75,
dpi=200,
extrapolate=True,
mission='both',
colors=["#3498db", "#27ae60", "#95a5a6"]):
"""Plots a bar chart showing the number of publications per year.
Parameters
----------
db : `PublicationDB` object
Data to plot.
output_fn : str
Output filename of the plot.
first_year : int
What year should the plot start?
barwidth : float
Aesthetics -- how wide are the bars?
dpi : float
Output resolution.
extrapolate : boolean
If `True`, extrapolate the publication count in the current year.
mission : str
'kepler', 'k2', or 'both'
colors : list of str
Define the facecolor for [kepler, k2, extrapolation]
"""
# Obtain the dictionary which provides the annual counts
current_year = datetime.datetime.now().year
counts = db.get_annual_publication_count(year_begin=first_year, year_end=current_year)
# Now make the actual plot
fig = pl.figure()
ax = fig.add_subplot(111)
if mission != 'k2':
pl.bar(np.array(list(counts['kepler'].keys())),
list(counts['kepler'].values()),
label='Kepler',
facecolor=colors[0],
width=barwidth)
if mission != 'kepler':
if mission == 'k2':
bottom = None
else:
bottom = list(counts['kepler'].values())
pl.bar(np.array(list(counts['k2'].keys())),
list(counts['k2'].values()),
bottom=bottom,
label='K2-Based Publications',
facecolor=colors[1],
width=barwidth)
# Also plot the extrapolated prediction for the current year
if extrapolate:
now = datetime.datetime.now()
fraction_of_year_passed = float(now.strftime("%-j")) / 365.2425
if mission == 'both':
current_total = (counts['kepler'][current_year] +
counts['k2'][current_year])
else:
current_total = counts[mission][current_year]
expected = (1/fraction_of_year_passed - 1) * current_total
pl.bar(current_year,
expected,
bottom=current_total,
label='Extrapolation',
facecolor=colors[2],
width=barwidth)
# Aesthetics
pl.ylabel("Publications per year")
ax.get_xaxis().get_major_formatter().set_useOffset(False)
pl.xticks(range(first_year - 1, current_year + 1))
pl.xlim([first_year - 0.75*barwidth, current_year + 0.75*barwidth])
pl.legend(bbox_to_anchor=(0.1, 1., 1., 0.),
loc=3,
ncol=3,
borderaxespad=0.,
handlelength=0.8,
frameon=False)
# Disable spines
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
# Only show bottom and left ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Only show horizontal grid lines
ax.grid(axis='y')
pl.tight_layout(rect=(0, 0, 1, 0.95), h_pad=1.5)
log.info("Writing {}".format(output_fn))
pl.savefig(output_fn, dpi=dpi)
pl.close()
def plot_science_piechart(db, output_fn="kpub-piechart.pdf", dpi=200):
"""Plots a piechart showing exoplanet vs astrophysics publications.
Parameters
----------
db : `PublicationDB` object
Data to plot.
output_fn : str
Output filename of the plot.
dpi : float
Output resolution.
"""
count = []
for science in SCIENCES:
cur = db.con.execute("SELECT COUNT(*) FROM pubs "
"WHERE science = ?;", [science])
rows = list(cur.fetchall())
count.append(rows[0][0])
# Plot the pie chart
patches, texts, autotexts = pl.pie(count,
colors=['#f39c12', '#18bc9c'],
autopct="%.0f%%",
startangle=90)
# Now take care of the aesthetics
for t in autotexts:
t.set_fontsize(32)
t.set_color("white")
t.set_path_effects([path_effects.Stroke(linewidth=2,
foreground='#333333'),
path_effects.Normal()])
pl.legend(handles=patches,
labels=["Exoplanets", "Astrophysics"],
fontsize=22,
bbox_to_anchor=(0.2, 1.05, 1., 0.),
loc=3,
ncol=2,
borderaxespad=0.,
handlelength=0.8,
frameon=False)
pl.axis('equal') # required to ensure pie chart has equal aspect ratio
pl.tight_layout(rect=(0, 0, 1, 0.85), h_pad=1.5)
log.info("Writing {}".format(output_fn))
pl.savefig(output_fn, dpi=dpi)
pl.close()
def plot_author_count(db,
output_fn='kpub-author-count.pdf',
first_year=2008,
dpi=200,
colors=["#3498db", "#27ae60", "#95a5a6"]):
"""Plots a line chart showing the number of authors over time.
Parameters
----------
db : `PublicationDB` object
Data to plot.
output_fn : str
Output filename of the plot.
first_year : int
What year should the plot start?
dpi : float
Output resolution.
colors : list of str
Define the facecolor for [kepler, k2, extrapolation]
"""
# Obtain the dictionary which provides the annual counts
current_year = datetime.datetime.now().year
# Now make the actual plot
fig = pl.figure()
ax = fig.add_subplot(111)
cumulative_years = []
paper_counts = []
author_counts, first_author_counts = [], []
k2_count, kepler_count = [], []
for year in range(first_year - 1, current_year):
cumulative_years.append(year)
metrics = db.get_metrics(cumulative_years)
paper_counts.append(metrics['publication_count'])
author_counts.append(metrics['author_count'])
first_author_counts.append(metrics['first_author_count'])
k2_count.append(metrics['k2_count'])
kepler_count.append(metrics['kepler_count'])
# +1 because the stats for all of e.g. 2018 should show at Jan 1, 2019
ax.plot([y+1 for y in cumulative_years], paper_counts, label="Kepler & K2 publications", lw=9)
#ax.plot(cumulative_years, author_counts, label="Unique authors", lw=6)
ax.plot([y+1 for y in cumulative_years], first_author_counts, label="Unique first authors", lw=3)
# Aesthetics
#pl.title("Kepler & K2's scientific output over time")
pl.ylabel("Cumulative count")
ax.get_xaxis().get_major_formatter().set_useOffset(False)
pl.xticks(range(first_year - 1, current_year + 1))
pl.xlim([first_year + 0.5, current_year + 0.5])
pl.ylim([0, 1.05*np.max(paper_counts)])
pl.legend(bbox_to_anchor=(0.03, 0.95, 0.95, 0.),
loc="upper left",
ncol=1,
borderaxespad=0.,
handlelength=1.5,
frameon=True)
# Disable spines
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
# Only show bottom and left ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Only show horizontal grid lines
ax.grid(axis='y')
pl.tight_layout(rect=(0, 0, 1, 0.98), h_pad=1.5)
log.info("Writing {}".format(output_fn))
pl.savefig(output_fn, dpi=dpi)
pl.close()
if __name__ == "__main__":
plot_by_year()
plot_science_piechart()
|
|
#
# Copyright 2010 Suinova Designs Ltd.
#
__author__ = "Ted Wen"
__version__ = "0.1"
import logging, re, os
try:
from includes import * #assign global variables in this module
except:
logging.error('failed to load includes')
global_vars = {}
gTempVarPtn = re.compile(ur'{{\s*([\w\|:"/.?_$()]+)\s*}}',re.UNICODE)
gMatchPtn = re.compile(r'([|:?])')
realpath = os.path.dirname(__file__)
class Templite():
""" Simplistic template processing as an alternative to django template engine.
Syntax:
{{ var }} - replace var with its value, or do nothing if not found
Loops and conditions are not supported.
Supported: {{ varname }}, {{ varname|default:"" }}, {{ varname|date[time] }}, {{ include|subdir/file.html }}, {{ varname?true_var:false_var }}
Some global values can be saved in global_vars in file includes.py.
Other variables can be saved in a dict and pass as the vars argument to the constructor. eg. t = Templite({'var1':'value1'},out)
"""
def __init__(self, vars=None, out=None):
self.vars = vars or {}
self.out = out
def render(self, text, vars=None, out=None):
""" Render the text by substituting the variables with values from the dict.
@param text: HTML text to parse
@param vars: dict containing the variables and values to put in the text for output
@param out: output stream
@return: True if successfule
"""
if vars: self.vars = vars
if out: self.out = out
if self.out is None:
logging.error('templite.render_text() output stream is None')
raise Exception('No output')
if text is None or text == '':
logging.warning('templite.render_text() text parameter is empty')
return False
if text.find('{{') >= 0:
#gTempVarPtn = re.compile(ur'{{\s*([\w\|:"/.]+)\s*}}',re.UNICODE)
rs = gTempVarPtn.sub(self.get_value, text)
self.out.write(rs)
else:
self.out.write(text)
return True
def get_values(self,s):
""" Find and return value of key s in either self.vars or global_vars dictionary.
It returns None if not found. For global_vars, the result will be parsed for {{ v }} again. Be careful not to dead loop.
"""
if s in self.vars:
return self.vars[s]
if s in global_vars:
vs = global_vars[s]
if vs.find('{{') >= 0:
#logging.info('---------- parsing %s'%vs)
return gTempVarPtn.sub(self.get_value, vs)
return vs
#logging.warning('!!!!!!! var "%s" not found in self.vars or global_vars'%s)
return None
def get_value(self,match):
""" Return the value from self.vars, or error if not found.
Supported: {{ varname }}, {{ varname|default:"" }}, {{ varname|date[time] }}, {{ include|subdir/file.html }}, {{ varname?true_var:false_var }}
"""
ps = gMatchPtn.split(match.group(1))
n = len(ps)
if n < 1:
logging.error('Syntax error: nothing in {{}}')
return '{{}}'
var = ps[0]
val = self.get_values(var)
if val is None:
if var == 'include': #['include', '|', 'web/file.inc']
return self.read_file(ps[2])
elif n > 4 and ps[2] == 'default': #['var1', '|', 'default', ':', '".."']
return ps[4].replace('"','')
elif len(ps)>1 and ps[1] == '?':
if n > 4:
val = self.get_values(ps[4].strip())
else:
val = ''
n = 1
else:
rs = '{{ "%s" }} not found'%var
logging.error(rs)
return rs
if n > 2:
op = ps[1]
if op == '?' and n > 2: #['var', '?', 'true_s', ':', 'false_s'] the value of var must be bool type, :false_s is optional
if not isinstance(val, bool):
logging.error('var is not bool in %s'%match.group(1))
return match.group(1)
if val:
val = self.get_values(ps[2].strip())
#logging.info('!!!!!!! getting value %s=%s'%(ps[2],val))
elif n > 4:
val = self.get_values(ps[4].strip())
else:
val = ''
if val is None:
return match.group(1)
elif op == '|': #['var', '|', 'date[time]']
r = ps[2]
if r == 'datetime':
val = datetime.strftime(val,'%Y-%m-%d %H:%M:%S')
elif r == 'date':
val = datetime.strftime(val,'%Y-%m-%d')
else:
logging.error('unknown op in %s'%match.group(1))
return match.group(1)
if isinstance(val,basestring):
return val
return jsonize(val)
def read_file(self,filename):
logging.info('read_file(%s)'%filename)
if filename.find('$(')>=0:
filename = re.sub(r'\$\((\w+)\)',lambda m: self.get_values(m.group(1)),filename)
logging.info('\treplaced: %s'%filename)
if filename.startswith('/'):
sp = '%s%s'
else:
sp = '%s/%s'
fi = open(sp % (realpath,filename))
txt = fi.read()
fi.close()
return txt
def render_text(text, vars, out):
""" Simple interface for Templite.render.
@param text: HTML content text
@param vars: {'var':value,...} where value is normally a string, but can be an object.
@param out: output stream to send rendered text
"""
#for k,v in vars.items():
# logging.info('k,v=%s,%s'%(k,v))
t = Templite(vars, out)
return t.render(text)
def render_file(file, vars, out):
""" Read file content and calls render_text. """
fi = open(file)
txt = fi.read()
fi.close()
return render_text(txt, vars, out)
class Jsonizer():
""" JSON rendering class to make a JSON-format string out of a Python object.
Supported object types include dict, list and embedded recursively.
Output is UTF-8 encoded.
Special: if a string value starts with {, it is treated as dict string, no quote is added.
"""
def __init__(self):
self.buf = []
def jsonize(self, data):
self.buf = []
self.make_data(data)
return ''.join(self.buf)
def make_data(self, data):
if isinstance(data, dict):
self.make_dict(data)
elif isinstance(data, list):
self.make_list(data)
elif isinstance(data, basestring):
if data.find('"') >= 0: data = data.replace('"',"'")
if data.find('\r\n') >= 0: data = data.replace('\r\n','<br/>')
if data.find('\n') >= 0: data = data.replace('\n','<br/>')
if isinstance(data, unicode):
data = data.encode('utf-8')
if data.startswith('{') or data.startswith('['):
self.buf.append('%s' % data)
else:
self.buf.append('"%s"' % data)
else:
self.buf.append('%s' % data) #numbers
def make_dict(self, data):
self.buf.append('{')
count = 0
for d in data.items():
if count == 0:
count += 1
else:
self.buf.append(',')
self.buf.append('"%s":' % d[0]) #key
self.make_data(d[1]) #value
self.buf.append('}')
def make_list(self, data):
self.buf.append('[')
count = 0
for d in data:
if count == 0:
count += 1
else:
self.buf.append(',')
self.make_data(d)
self.buf.append(']')
def jsonize(data):
""" Wrap Jsonizer.jsonize into a simple method.
@param data: dict or array object to render into a JSON string.
@return JSON-format string.
"""
json = Jsonizer()
return json.jsonize(data)
|
|
from django.conf import settings
from django.conf.urls import url, patterns, include
from django.views.generic import TemplateView, RedirectView
from django.contrib import admin
from django.conf.urls.static import static
from threadedcomments.models import ThreadedComment
admin.autodiscover()
from audiotracks.models import get_track_model
Track = get_track_model()
from microblogging.feeds import TweetFeedAll, TweetFeedUser
from microblogging.feeds import TweetFeedUserWithFriends
from microblogging.models import Tweet
from photos.models import Image
from tagging.models import TaggedItem
from account.openid_consumer import PinaxConsumer
from blog.feeds import BlogFeedAll, BlogFeedUser
from blog.models import Post
from blog.forms import BlogForm
from smeuhoverride import feeds
handler500 = "pinax.views.server_error"
tweets_feed_dict = {"feed_dict": {
"all": TweetFeedAll,
"only": TweetFeedUser,
"with_friends": TweetFeedUserWithFriends,
}}
blogs_feed_dict = {"feed_dict": {
"all": BlogFeedAll,
"only": BlogFeedUser,
}}
urlpatterns = patterns(
"",
url(r"^favicon.ico/?$", RedirectView.as_view(
url=settings.STATIC_URL + 'img/favicon.ico',
permanent=True)),
url(r"^$", "timeline.views.home", name="home"),
url(r"5c/$", "timeline.views.legacy",),
url(r"^admin/", include(admin.site.urls)),
url(r"^about/", include("about.urls")),
url(r"^account/", include("account.urls")),
url(r"^openid/(.*)", PinaxConsumer()),
url(r"^profiles/", include("profiles.urls")),
# Blog URLs ############################################
# all blog posts
url(r"^blogs/?$", "blog.views.blogs",
name="blog_list_all"),
url(r"^(?P<username>[\w\._-]+)/blog/feed/?$", feeds.UserBlogPosts(),
name="user_blog_feed"),
# blog post
url(r"^(?P<username>[-\w]+)/blog/(?P<slug>[-\w]+)/source/?$",
"smeuhoverride.views.blog_post_source", name="blog_post_source"),
url(r"^(?P<username>[-\w]+)/blog/(?P<slug>[-\w]+)/?$",
"blog.views.post", name="blog_post"),
# blog post for user
url(r"^(?P<username>\w+)/blog/?$",
"smeuhoverride.views.user_blog_index", name="blog_list_user"),
# your posts
url(r"^blogs/your_posts/?$",
"blog.views.your_posts", name="blog_list_yours"),
# new blog post
url(r"^blogs/new/$", "blog.views.new", name="blog_new"),
# edit blog post
url(r"^blogs/edit/(\d+)/$",
"blog.views.edit", name="blog_edit"),
# destory blog post
url(r"^blogs/destroy/(\d+)/$",
"blog.views.destroy", name="blog_destroy"),
# ajax validation
(r"^blogs/validate/$", "ajax_validation.views.validate", {
"form_class": BlogForm,
"callback": lambda request, *args, **kwargs: {"user": request.user}
}, "blog_form_validate"),
# /END Blog URLs #######################################
url(r"^invitations/", include("friends_app.urls")),
url(r"^notices/", include("notification.urls")),
url(r"^messages/", include("messages.urls")),
url(r"^touites/", include("microblogging.urls")),
url(r"^comments/", include("threadedcomments.urls")),
url(r"^i18n/", include("django.conf.urls.i18n")),
url(r"^photos/", include("photos.urls")),
url(r"^avatar/", include("avatar.urls")),
url(r"^fu/", include("fukung.urls")),
url(r"^timeline/", include("timeline.urls")),
url(r"^artist/", include("artist.urls")),
# Feeds urls
url(r"^feeds/touites/(?P<username>[\w\._-]+)/with_friends/?$",
feeds.UserTweetWithFriends(
), name="user_friends_tweets"),
url(r"^feeds/touites/(?P<username>[\w\._-]+)/?$", feeds.UserTweet(),
name="user_tweets"),
url(r"^feeds/touites/?$",
feeds.AllTweet(), name="all_tweets_feed"),
url(r"^feeds/photos/?$",
feeds.AllPhotos(), name="all_photos_feed"),
url(r"^feeds/comments/?$",
feeds.AllComments(), name="all_comments_feed"),
url(r"^feeds/blogs/?$", feeds.AllBlogPosts(),
name="all_blogs_feed"),
)
# @@@ for now, we'll use friends_app to glue this stuff together
friends_photos_kwargs = {
"template_name": "photos/friends_photos.html",
"friends_objects_function": lambda users: Image.objects.filter(member__in=users).order_by("-date_added"),
}
friends_blogs_kwargs = {
"template_name": "blog/friends_posts.html",
"friends_objects_function": lambda users: Post.objects.filter(author__in=users),
}
friends_tweets_kwargs = {
"template_name": "microblogging/friends_tweets.html",
"friends_objects_function": lambda users: Tweet.objects.filter(sender_id__in=[user.id for user in users], sender_type__name="user"),
}
urlpatterns += patterns(
"",
url(r"^photos/friends_photos/$", "friends_app.views.friends_objects",
kwargs=friends_photos_kwargs, name="friends_photos"),
url(r"^blog/friends_blogs/$", "friends_app.views.friends_objects",
kwargs=friends_blogs_kwargs, name="friends_blogs"),
url(r"^touites/friends_tweets/$", "friends_app.views.friends_objects",
kwargs=friends_tweets_kwargs, name="friends_tweets"),
)
tagged_models = (
dict(title="Tweets",
query=lambda tag: TaggedItem.objects.get_by_model(
Tweet, tag),
content_template="pinax_tagging_ext/tweets.html",
),
dict(title="Comments",
query=lambda tag: TaggedItem.objects.get_by_model(
ThreadedComment , tag),
content_template="pinax_tagging_ext/comments.html",
),
dict(title="Blog Posts",
query=lambda tag: TaggedItem.objects.get_by_model(
Post, tag).filter(status=2),
content_template="pinax_tagging_ext/blogs.html",
),
dict(title="Photos",
query=lambda tag: TaggedItem.objects.get_by_model(
Image, tag).filter(safetylevel=1),
content_template="pinax_tagging_ext/photos.html",
),
dict(title="Audio Tracks",
query=lambda tag: TaggedItem.objects.get_by_model(Track, tag),
content_template="pinax_tagging_ext/audiotracks.html",
),
)
tagging_ext_kwargs = {
"tagged_models": tagged_models,
}
urlpatterns += patterns(
"",
#url(r"^tags/(?P<tag>.+)/(?P<model>.+)$", "tagging_ext.views.tag_by_model",
# kwargs=tagging_ext_kwargs, name="tagging_ext_tag_by_model"),
#url(r"^tags/(?P<tag>.+)/$", "tagging_ext.views.tag",
# kwargs=tagging_ext_kwargs, name="tagging_ext_tag"),
url(r"^tags/(?P<tagname>.+)/$", "timeline.views.tag_home", name="tag_homepage"),
url(r"^tags/$", "smeuhoverride.views.tag_index",
kwargs={'limit': 1000}, name="tagging_ext_index"),
)
urlpatterns += patterns(
"",
url("^(?P<username>[\w\._-]+)/music", include(
"audiotracks.urls"), name = "user_track"),
url("^music", include("audiotracks.urls"))
)
urlpatterns += patterns(
"",
#url(r"^(?P<username>[\w\._-]+)/$",
# "profiles.views.profile", name="profile_detail"),
url(r"^(?P<username>[\w\._-]+)/$",
"timeline.views.user_home", name="profile_detail"),
)
if settings.SERVE_MEDIA:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
|
from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.shortcuts import render
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Callable, Dict, List, Optional, Set, Text, \
Tuple, Type, Union
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render(request,
'analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True]
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True, False]
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message', 'huddle_message']
labels = ['Public streams', 'Private streams', 'Private messages', 'Group private messages']
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = [True, True]
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
# these are further re-written by client_label_map
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
labels_sort_function = sort_client_labels
include_empty_subgroups = [False, False]
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table, include_empty_subgroups_ in zip(tables, include_empty_subgroups):
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups_)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups_)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = []
for label, values in value_arrays.items():
totals.append((label, sum(values)))
totals.sort(key=lambda label_total: "%s:%s" % (label_total[1], label_total[0]), reverse=True)
return [label for label, total in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise AssertionError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "New iOS app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
# Clients in dev environment autogenerated data start with _ so
# that it's easy to manually drop without affecting other data.
if settings.DEVELOPMENT and name.startswith("_"):
return name[1:]
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], List[str], bool) -> Dict[str, List[int]]
if len(subgroups) != len(labels):
raise AssertionError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title, is_home=True),
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = timezone.now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render(
request,
'analytics/activity.html',
context=dict(data=data, realm_link=realm_link, title=title),
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title),
)
|
|
from rest_framework import status, viewsets, exceptions, generics
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.decorators import action, api_view
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.response import Response
from rest_framework.settings import api_settings
from . import serializers, permissions, models, filters, pagination
import scraper
class GetObjectAllMixin():
"""
A mixin that provides an override of the get_object method. Useful when we
want both a filtered get_queryset but also want the get_object method to
provide 401 (unauthorized) and 403 (forbidden) errors not just 404 (not
found).
"""
# Must provide a model class
model_class = None
def get_object(self):
"""
Overried rest_framework.generics.GenericAPIView get_object method which
uses the self.get_queryset(). This would result in 404 errors rather
than 401 or 403 errors if the object exists but user doesn't have
permission.
"""
# Here's the only change: objects.all() instead of self.get_queryset()
assert self.model_class is not None, (
"'%s' did not set model_class"
% self.__class__.__name__
)
queryset = self.filter_queryset(self.model_class.objects.all())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = generics.get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class CreateWithUserMixin():
# Based on rest_framework's CreateModelMixin but adds user from the request
def create(self, request, *args, **kwargs):
data = request.data
data['user'] = request.user.pk
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
serializer.data,
status=status.HTTP_201_CREATED,
headers=headers)
def perform_create(self, serializer):
serializer.save()
def get_success_headers(self, data):
try:
return {'Location': str(data[api_settings.URL_FIELD_NAME])}
except (TypeError, KeyError):
return {}
class GroceryItemViewSet(CreateWithUserMixin, GetObjectAllMixin,
viewsets.ModelViewSet):
model_class = models.GroceryItem
serializer_class = serializers.GroceryItemSerializer
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ('id', 'name',)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
permissions.IsOwnerOrReadOnly,)
ordering_fields = ('name', 'group__name')
def get_queryset(self):
user = self.request.user
return self.model_class.filter_user_and_None(user)
class GroceryGroupViewSet(CreateWithUserMixin, GetObjectAllMixin,
viewsets.ModelViewSet):
model_class = models.GroceryGroup
serializer_class = serializers.GroceryGroupSerializer
filter_class = filters.GroceryGroupFilter
filter_backends = (filters.DjangoFilterBackend,)
search_fields = ('name', 'id',)
pagination_class = pagination.CustomPagination
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
permissions.IsOwnerOrReadOnly,)
def get_queryset(self):
user = self.request.user
return self.model_class.filter_user_and_None(user)
class SourceViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Source.objects.all()
serializer_class = serializers.SourceSerializer
pagination_class = pagination.CustomPagination
class BookViewSet(CreateWithUserMixin, GetObjectAllMixin,
viewsets.ModelViewSet):
model_class = models.Book
serializer_class = serializers.BookSerializer
pagination_class = pagination.CustomPagination
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
permissions.IsOwnerOrReadOnly,)
def get_queryset(self):
user = self.request.user
return self.model_class.filter_user(user)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.User.objects.all()
serializer_class = serializers.UserSerializer
pagination_class = pagination.CustomPagination
class RecipeViewSet(CreateWithUserMixin, GetObjectAllMixin,
viewsets.ModelViewSet):
model_class = models.Recipe
serializer_class = serializers.RecipeSerializer
pagination_class = pagination.CustomPagination
filter_class = filters.RecipeFilter
filter_backends = (filters.SearchFilter, filters.DjangoFilterBackend)
search_fields = ('title',)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
permissions.IsOwnerOrIsPublic,
permissions.IsOwnerOrReadOnly,)
def get_queryset(self):
user = self.request.user
return self.model_class.filter_user_and_public(user)
class AuthViewSet(viewsets.ViewSet):
serializer_class = serializers.AuthSerializer
permission_classes = [
permissions.AllowAny
]
def _get_response_data(self, user, token=None):
serializer = self.serializer_class(user)
data = {**serializer.data}
if token:
data['token'] = token.key
return data
def _get_username_from_email(self, data):
# Need a username, if email supplied use that
if data.get('username') is None:
data['username'] = data.get('email')
@action(methods=['post'], detail=False)
def verify(self, request):
user = request.user
if user and user.is_authenticated:
token = Token.objects.get(user=user)
data = self._get_response_data(user, token)
return Response(data)
raise PermissionDenied()
@action(methods=['post'], detail=False)
def signup(self, request):
self._get_username_from_email(request.data)
serializer = self.serializer_class(
data=request.data, context={'request': request}
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
token, created = Token.objects.get_or_create(user=user)
data = self._get_response_data(user, token)
return Response(data)
@action(methods=['post'], detail=False)
def login(self, request):
self._get_username_from_email(request.data)
serializer = AuthTokenSerializer(
data=request.data, context={'request': request}
)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
data = self._get_response_data(user, token)
return Response(data)
class TagViewSet(CreateWithUserMixin, GetObjectAllMixin,
viewsets.ModelViewSet):
model_class = models.Tag
serializer_class = serializers.TagSerializer
pagination_class = pagination.CustomPagination
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
permissions.IsOwnerOrReadOnly,)
def get_queryset(self):
user = self.request.user
return self.model_class.filter_user(user)
@api_view(['GET'])
def scrape_view(request):
"""
Handles submission and asyncValidate for step 1 of creating recipes wizard
Args:
request: A GET request with a 'url' query param.
Returns:
A HTTP response with JSON body of scraped data
{
"title": String, "source": Number, "domain_name": String,
"url": String, "serves": Number, "ingredients": Object[],
"preparation": String[]
}
Raises:
ParseError
"""
url = request.query_params.get('url', None)
if url is None:
raise ParseError({"url": "Url required, or choose manual entry."})
try:
dict_ = scraper.scrape(url)
source_id = models.Source.get_id_from_domain_name(dict_['domain'])
dict_['source'] = source_id
return Response(dict_)
except (
scraper.exceptions.InvalidURLError,
scraper.exceptions.URLError,
scraper.exceptions.WebsiteNotImplementedError
) as err:
raise ParseError(
{"url": err}
)
except (scraper.exceptions.RequestException) as err:
raise ParseError(
err
)
@api_view(['GET', 'POST', 'PATCH', 'DELETE', 'PATCH', 'OPTIONS', 'HEAD'])
def not_found(request):
raise exceptions.NotFound()
|
|
#!/usr/bin/env python
from google.appengine.ext.webapp import template
from google.appengine.ext import ndb
import logging
import os.path
import webapp2
from webapp2_extras import auth
from webapp2_extras import sessions
from webapp2_extras.auth import InvalidAuthIdError
from webapp2_extras.auth import InvalidPasswordError
def user_required(handler):
"""
Decorator that checks if there's a user associated with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
auth = self.auth
if not auth.get_user_by_session():
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_login
class BaseHandler(webapp2.RequestHandler):
@webapp2.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__), 'views', view_filename)
self.response.out.write(template.render(path, params))
def display_message(self, message):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
self.render_template('message.html', params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
self.render_template('home.html')
class SignupHandler(BaseHandler):
def get(self):
self.render_template('signup.html')
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, verified=False)
if not user_data[0]: #user_data is a tuple
self.display_message('Unable to create user for email %s because of \
duplicate keys %s' % (user_name, user_data[1]))
return
user = user_data[1]
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
# TODO: Send verification link in email to user
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
msg = 'Verify your account with the following link: ' \
' <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
save_session=True)
self.redirect(self.uri_for('home'))
except (InvalidAuthIdError, InvalidPasswordError) as e:
logging.info('Login failed for user %s because of %s', username, type(e))
self._serve_page(True)
def _serve_page(self, failed=False):
username = self.request.get('username')
params = {
'username': username,
'failed': failed
}
self.render_template('login.html', params)
class LogoutHandler(BaseHandler):
def get(self):
self.auth.unset_session()
self.redirect(self.uri_for('home'))
class AuthenticatedHandler(BaseHandler):
@user_required
def get(self):
self.render_template('authenticated.html')
config = {
'webapp2_extras.auth': {
'user_model': 'models.User',
'user_attributes': ['name']
},
'webapp2_extras.sessions': {
'secret_key': 'YOUR_SECRET_KEY'
}
}
app = webapp2.WSGIApplication([
webapp2.Route('/', MainHandler, name='home'),
webapp2.Route('/signup', SignupHandler),
webapp2.Route('/<type:v|p>/<user_id:\d+>-<signup_token:.+>',
handler=VerificationHandler, name='verification'),
webapp2.Route('/password', SetPasswordHandler),
webapp2.Route('/login', LoginHandler, name='login'),
webapp2.Route('/logout', LogoutHandler, name='logout'),
webapp2.Route('/forgot', ForgotPasswordHandler, name='forgot'),
webapp2.Route('/authenticated', AuthenticatedHandler, name='authenticated')
], debug=True, config=config)
logging.getLogger().setLevel(logging.DEBUG)
|
|
import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum.util import bfh, bh2u
from electrum.bitcoin import (is_segwit_address, b58_address_to_hash160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
from electrum.i18n import _
from electrum.plugins import BasePlugin, hook
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Electrum and %s encryption and decryption are currently incompatible') % self.device)
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
payload = base64.b64decode(message)
nonce, message, msg_hmac = payload[:33], payload[33:-8], payload[-8:]
result = client.decrypt_message(address_n, nonce, message, msg_hmac)
return result.message
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = b58_address_to_hash160(address)
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype')
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
|
"""
vcfexplorer.api.resources
VCF Explorer api resources
"""
from flask.ext.restful import Api, Resource, abort, reqparse
import pymongo
from ..helpers import get_mongodb
## Common argument parsing
common_reqparse = reqparse.RequestParser()
common_reqparse.add_argument('limit', type = int, default = 1)
common_reqparse.add_argument('offset', type = int, default = 0)
common_reqparse.add_argument('sort_field', type = str, default = '')
common_reqparse.add_argument('sort_order', type = int, default = 1)
class VCFs(Resource):
def get(self):
"""
Return all vcf datasets
"""
db = get_mongodb()
vcfs = db.vcfs.find()
if vcfs.count():
return vcfs
else:
abort(404)
class VCF(Resource):
def get(self, vcf_name):
"""
Return run metadata from vcfs collection
"""
db = get_mongodb()
vcf = db.vcfs.find_one({'name':vcf_name})
if vcf:
return vcf
else:
abort(404)
class VCFVariants(Resource):
def __init__(self):
"""
Setup argument parsing
"""
self.reqparse = common_reqparse.copy()
self.reqparse.add_argument('filtered_vars', type = bool, default = False)
super(VCFVariants, self).__init__()
def get(self, vcf_name):
"""
Return all variants from a run
"""
args = self.reqparse.parse_args()
db = get_mongodb()
vcf = db.vcfs.find_one({'name':vcf_name})
if vcf:
pipeline = [
{"$match": {"samples.vcf_id": vcf['_id']}},
{"$unwind": "$samples"},
{"$match": {"samples.vcf_id": vcf['_id']}},
]
if not args['filtered_vars']:
pipeline.extend([
{"$match": {"samples.filter": {"$exists": False}}}
])
pipeline.extend([
{"$group": {
"_id":"$_id",
"samples": {"$push":"$samples"},
"chr": {"$first":"$chr"},
"pos": {"$first":"$pos"},
"ref": {"$first":"$ref"},
"alt": {"$first":"$alt"},
#"filter": {"$first":"$samples.filter"},
#"total_ac": {"$first":"$total_ac"},
#"alt_ac": {"$first":"$alt_ac"},
}
}
])
vcf_variants = db.variants.aggregate(pipeline, allowDiskUse=True)
if vcf_variants.alive:
return vcf_variants
else:
abort(404)
else: # if vcf
abort(404)
class Samples(Resource):
def __init__(self):
"""
Setup argument parsing
"""
self.reqparse = common_reqparse.copy()
super(Samples, self).__init__()
def get(self):
"""
Return all samples
"""
args = self.reqparse.parse_args()
db = get_mongodb()
pipeline = [
{"$unwind": "$samples"}
]
if args['sort_field']:
pipeline.append({"$sort": {args['sort_field']: args['sort_order']} })
pipeline.extend([
{"$skip": args['offset']},
{"$limit": args['limit']},
{"$group": {
"_id": "$samples",
"vcf_files": {"$push": "$vcf_file"},
"upload_date": {"$last": "$upload_date"}
}
}
])
print pipeline
samples = db.vcfs.aggregate(pipeline)
if samples.alive:
return samples
else:
abort(404)
class Sample(Resource):
def get(self, sample_name):
"""
Return sample metadata from run collection?
"""
db = get_mongodb()
vcfs = db.vcfs.find({'samples':sample_name})
if vcfs:
vcfs = [vcf for vcf in vcfs]
return {'sample':sample_name, 'vcfs': vcfs}
else:
abort(404)
class SampleVariants(Resource):
def __init__(self):
"""
Setup argument parsing
"""
self.reqparse = common_reqparse.copy()
self.reqparse.add_argument('filtered_vars', type = bool, default = False)
super(SampleVariants, self).__init__()
def get(self, sample_name):
"""
Return all variants from a sample
"""
args = self.reqparse.parse_args()
db = get_mongodb()
print sample_name
db_filter = {'samples.sample':sample_name}
if not args['filtered_vars']:
db_filter['samples.filter'] = {'$exists': False}
db_projection = {
'chr': 1, 'pos': 1, 'ref': 1, 'alt': 1,
#'total_ac': 1, 'alt_ac': 1,
'samples': { '$elemMatch': {'sample':sample_name} }
}
sample_variants = db.variants.find(db_filter, db_projection)
if sample_variants.count():
return sample_variants
else:
abort(404)
class Variants(Resource):
def __init__(self):
"""
Setup argument parsing
"""
self.reqparse = common_reqparse.copy()
super(Variants, self).__init__()
def get(self):
"""
Return all variants
"""
args = self.reqparse.parse_args()
db = get_mongodb()
db_projection = {
'samples' : 0
}
variants = db.variants.find(projection=db_projection, skip=args['offset'], limit=args['limit'])
## Sorting
if args['sort_field']:
variants = variants.sort(args['sort_field'],args['sort_order'])
if variants.count():
return [variants, variants.count()]
else:
abort(404)
class Variant(Resource):
def get(self, variant_id):
"""
Return a variant
"""
db = get_mongodb()
db_projection = {
'samples' : 0
}
variant = db.variants.find_one({'_id':variant_id}, db_projection)
if variant:
return variant
else:
abort(404)
class Root(Resource):
def get(self):
"""
Return basic database information
"""
db = get_mongodb()
return {
'db': str(db),
'mongodb version': db.command("serverStatus")['version'],
'uptime':db.command("serverStatus")['uptime'],
}
|
|
# -*- coding: utf-8 -*-
"""
1) Ambiguity / num names
2) independence of annotations
3) continuous
4) exponential case
5) speicifc examples of our prob
6) human in loop
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six # NOQA
import utool as ut
import numpy as np
from six.moves import zip
from ibeis.algo.hots import pgm_ext
print, rrr, profile = ut.inject2(__name__)
#SPECIAL_BASIS_POOL = ['fred', 'sue', 'tom']
SPECIAL_BASIS_POOL = []
#'fred', 'sue', 'tom']
# Quickly change names to be consistent with papers Sorry, person reading code.
# This will be confusing and inconsistent
NAME_TTYPE = 'name'
MATCH_TTYPE = 'same'
SCORE_TTYPE = 'evidence_match'
def temp_model(num_annots, num_names, score_evidence=[], name_evidence=[],
other_evidence={}, noquery=False, verbose=None,
**kwargs):
if verbose is None:
verbose = ut.VERBOSE
method = kwargs.pop('method', None)
model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs)
if verbose:
model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE])
model, evidence, soft_evidence = update_model_evidence(
model, name_evidence, score_evidence, other_evidence)
if verbose and len(soft_evidence) != 0:
model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE],
title='Soft Evidence', color='green')
#if verbose:
# ut.colorprint('\n --- Soft Evidence ---', 'white')
# for ttype, cpds in model.ttype2_cpds.items():
# if ttype != MATCH_TTYPE:
# for fs_ in ut.ichunks(cpds, 4):
# ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
# 'green')
if verbose:
ut.colorprint('\n --- Inference ---', 'red')
if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery:
evidence = model._ensure_internal_evidence(evidence)
query_vars = []
query_vars += ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable')
#query_vars += ut.list_getattr(model.ttype2_cpds[MATCH_TTYPE], 'variable')
query_vars = ut.setdiff(query_vars, evidence.keys())
#query_vars = ut.setdiff(query_vars, soft_evidence.keys())
query_results = cluster_query(model, query_vars, evidence,
soft_evidence, method)
else:
query_results = {}
factor_list = query_results['factor_list']
if verbose:
if verbose:
print('+--------')
semtypes = [model.var2_cpd[f.variables[0]].ttype
for f in factor_list]
for type_, factors in ut.group_items(factor_list, semtypes).items():
print('Result Factors (%r)' % (type_,))
factors = ut.sortedby(factors, [f.variables[0] for f in factors])
for fs_ in ut.ichunks(factors, 4):
ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
'yellow')
print('MAP assignments')
top_assignments = query_results.get('top_assignments', [])
tmp = []
for lbl, val in top_assignments:
tmp.append('%s : %.4f' % (ut.repr2(lbl), val))
print(ut.align('\n'.join(tmp), ' :'))
print('L_____\n')
showkw = dict(evidence=evidence,
soft_evidence=soft_evidence,
**query_results)
from ibeis.algo.hots import pgm_viz
pgm_viz.show_model(model, **showkw)
return (model, evidence, query_results)
# pgm_ext.print_ascii_graph(model)
def make_name_model(num_annots, num_names=None, verbose=True, mode=1,
num_scores=2, p_score_given_same=None,
hack_score_only=False, score_basis=None,
special_names=None):
r"""
CommandLine:
python -m ibeis.algo.hots.bayes --exec-make_name_model --no-cnn
python -m ibeis.algo.hots.bayes --exec-make_name_model --show --no-cnn
python -m ibeis.algo.hots.bayes --exec-make_name_model --num-annots=3
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.bayes import * # NOQA
>>> defaults = dict(num_annots=2, num_names=2, verbose=True)
>>> modeltype = ut.get_argval('--modeltype', default='bayes')
>>> kw = ut.argparse_funckw(make_name_model, defaults)
>>> model = make_name_model(**kw)
>>> ut.quit_if_noshow()
>>> model.show_model(show_prior=False, show_title=False, modeltype=modeltype)
>>> ut.show_if_requested()
"""
if special_names is None:
special_names = SPECIAL_BASIS_POOL
assert mode == 1, 'only can do mode 1'
base = ut.get_argval('--base', type_=str, default='a')
annots = ut.chr_range(num_annots, base=base)
# The indexes of match CPDs will not change if another annotation is added
upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2)
if hack_score_only:
upper_diag_idxs = upper_diag_idxs[-hack_score_only:]
if num_names is None:
num_names = num_annots
# +--- Define CPD Templates and Instantiation ---
cpd_list = []
# Name Factor
name_cpd_t = pgm_ext.TemplateCPD(
NAME_TTYPE, ('n', num_names),
special_basis_pool=special_names)
name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots]
#name_cpds = [name_cpd_t.new_cpd(parents=aid, constrain_state=count)
# for count, aid in enumerate(annots, start=1)]
cpd_list.extend(name_cpds)
# Match Factor
def match_pmf(match_type, n1, n2):
return {
True: {'same': 1.0, 'diff': 0.0},
False: {'same': 0.0, 'diff': 1.0},
}[n1 == n2][match_type]
match_states = ['diff', 'same']
match_cpd_t = pgm_ext.TemplateCPD(
MATCH_TTYPE, match_states,
evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf)
#match_cpd_t.varpref = 'S'
namepair_cpds = ut.unflat_take(name_cpds, upper_diag_idxs)
match_cpds = [match_cpd_t.new_cpd(parents=cpds)
for cpds in namepair_cpds]
cpd_list.extend(match_cpds)
# Score Factor
score_states = list(range(num_scores))
if score_basis is not None:
score_states = ['%.2f' % (s,) for s in score_basis]
if p_score_given_same is None:
tmp = np.arange(num_scores + 1)[1:]
tmp = np.cumsum(tmp)
tmp = (tmp / tmp.sum())
p_score_given_same = tmp
def score_pmf(score_type, match_type):
if isinstance(score_type, six.string_types):
score_type = score_states.index(score_type)
if match_type == 'same':
return p_score_given_same[score_type]
else:
return p_score_given_same[-(score_type + 1)]
score_cpd_t = pgm_ext.TemplateCPD(
SCORE_TTYPE, score_states,
evidence_ttypes=[match_cpd_t], pmf_func=score_pmf)
#match_cpd_t.varpref = 'P'
score_cpds = [score_cpd_t.new_cpd(parents=cpds)
for cpds in zip(match_cpds)]
cpd_list.extend(score_cpds)
with_humans = False
if with_humans:
human_states = ['diff', 'same']
human_cpd_t = pgm_ext.TemplateCPD(
'human', human_states,
evidence_ttypes=[match_cpd_t], pmf_func=[[.9, .1], [.1, .9]])
human_cpds = [human_cpd_t.new_cpd(parents=cpds)
for cpds in zip(match_cpds)]
cpd_list.extend(human_cpds)
with_rank = False # Rank depends on dependant scores
if with_rank:
rank_states = ['0', '1', '2', '3']
rank_cpd_t = pgm_ext.TemplateCPD(
'rank', rank_states,
evidence_ttypes=[match_cpd_t], pmf_func=None)
rank_cpds = [rank_cpd_t.new_cpd(parents=cpds)
for cpds in zip(match_cpds)]
cpd_list.extend(rank_cpds)
# L___ End CPD Definitions ___
print('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'),))
# Make Model
model = pgm_ext.define_model(cpd_list)
model.num_names = num_names
if verbose:
model.print_templates(ignore_ttypes=[MATCH_TTYPE])
return model
def update_model_evidence(model, name_evidence, score_evidence, other_evidence):
r"""
CommandLine:
python -m ibeis.algo.hots.bayes --exec-update_model_evidence
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.bayes import * # NOQA
>>> verbose = True
>>> other_evidence = {}
>>> name_evidence = [0, 0, 1, 1, None]
>>> score_evidence = ['high', 'low', 'low', 'low', 'low', 'high']
>>> model = make_name_model(num_annots=5, num_names=3, verbose=True,
>>> mode=1)
>>> update_model_evidence(model, name_evidence, score_evidence,
>>> other_evidence)
"""
name_cpds = model.ttype2_cpds[NAME_TTYPE]
score_cpds = model.ttype2_cpds[SCORE_TTYPE]
evidence = {}
evidence.update(other_evidence)
soft_evidence = {}
def apply_hard_soft_evidence(cpd_list, evidence_list):
for cpd, ev in zip(cpd_list, evidence_list):
if isinstance(ev, int):
# hard internal evidence
evidence[cpd.variable] = ev
if isinstance(ev, six.string_types):
# hard external evidence
evidence[cpd.variable] = cpd._internal_varindex(
cpd.variable, ev)
if isinstance(ev, dict):
# soft external evidence
# HACK THAT MODIFIES CPD IN PLACE
def rectify_evidence_val(_v, card=cpd.variable_card):
# rectify hacky string structures
tmp = (1 / (2 * card ** 2))
return (1 + tmp) / (card + tmp) if _v == '+eps' else _v
ev_ = ut.map_dict_vals(rectify_evidence_val, ev)
fill = (1.0 - sum(ev_.values())) / (cpd.variable_card - len(ev_))
# HACK fix for float problems
if len(ev_) == cpd.variable_card - 1:
fill = 0
assert fill > -1E7, 'fill=%r' % (fill,)
row_labels = list(ut.iprod(*cpd.statenames))
for i, lbl in enumerate(row_labels):
if lbl in ev_:
# external case1
cpd.values[i] = ev_[lbl]
elif len(lbl) == 1 and lbl[0] in ev_:
# external case2
cpd.values[i] = ev_[lbl[0]]
elif i in ev_:
# internal case
cpd.values[i] = ev_[i]
else:
cpd.values[i] = fill
cpd.normalize()
soft_evidence[cpd.variable] = True
apply_hard_soft_evidence(name_cpds, name_evidence)
apply_hard_soft_evidence(score_cpds, score_evidence)
return model, evidence, soft_evidence
def reduce_marginalize(phi, query_variables=None,
evidence={}, inplace=False):
"""
Hack for reduction followed by marginalization
Example:
>>> reduced_joint = joint.observe(
>>> query_variables, evidence, inplace=False)
>>> new_rows = reduced_joint._row_labels()
>>> new_vals = reduced_joint.values.ravel()
>>> map_vals = new_rows[new_vals.argmax()]
>>> map_assign = dict(zip(reduced_joint.variables, map_vals))
"""
reduced_joint = phi if inplace else phi.copy()
if query_variables is None:
query_variables = reduced_joint.variables
reduced_joint.reduce(evidence)
reduced_joint.normalize()
# Marginalize over non-query, non-evidence
irrelevant_vars = (
set(reduced_joint.variables) -
(set(evidence.keys()) | set(query_variables))
)
reduced_joint.marginalize(irrelevant_vars)
reduced_joint.normalize()
if not inplace:
return reduced_joint
def make_temp_state(state):
mapping = {}
for state_idx in state:
if state_idx not in mapping:
mapping[state_idx] = -(len(mapping) + 1)
temp_state = [mapping[state_idx] for state_idx in state]
return temp_state
def collapse_labels(model, evidence, reduced_variables, reduced_row_idxs,
reduced_values):
import vtool_ibeis as vt
#assert np.all(reduced_joint.values.ravel() == reduced_joint.values.flatten())
reduced_ttypes = [model.var2_cpd[var].ttype for var in reduced_variables]
evidence_vars = list(evidence.keys())
evidence_state_idxs = ut.dict_take(evidence, evidence_vars)
evidence_ttypes = [model.var2_cpd[var].ttype for var in evidence_vars]
ttype2_ev_indices = dict(zip(*ut.group_indices(evidence_ttypes)))
ttype2_re_indices = dict(zip(*ut.group_indices(reduced_ttypes)))
# ttype2_ev_indices = ut.group_items(range(len(evidence_vars)), evidence_ttypes)
# ttype2_re_indices = ut.group_items(range(len(reduced_variables)), reduced_ttypes)
# Allow specific types of labels to change
# everything is the same, only the names have changed.
# TODO: allow for multiple different label_ttypes
# for label_ttype in label_ttypes
if NAME_TTYPE not in model.ttype2_template:
return reduced_row_idxs, reduced_values
label_ttypes = [NAME_TTYPE]
for label_ttype in label_ttypes:
ev_colxs = ttype2_ev_indices[label_ttype]
re_colxs = ttype2_re_indices[label_ttype]
ev_state_idxs = ut.take(evidence_state_idxs, ev_colxs)
ev_state_idxs_tile = np.tile(ev_state_idxs, (len(reduced_values), 1)).astype(np.int)
num_ev_ = len(ev_colxs)
aug_colxs = list(range(num_ev_)) + (np.array(re_colxs) + num_ev_).tolist()
aug_state_idxs = np.hstack([ev_state_idxs_tile, reduced_row_idxs])
# Relabel rows based on the knowledge that
# everything is the same, only the names have changed.
num_cols = len(aug_state_idxs.T)
mask = vt.index_to_boolmask(aug_colxs, num_cols)
other_colxs, = np.where(~mask)
relbl_states = aug_state_idxs.compress(mask, axis=1)
other_states = aug_state_idxs.compress(~mask, axis=1)
tmp_relbl_states = np.array(list(map(make_temp_state, relbl_states)))
max_tmp_state = -1
min_tmp_state = tmp_relbl_states.min()
# rebuild original state structure with temp state idxs
tmp_state_cols = [None] * num_cols
for count, colx in enumerate(aug_colxs):
tmp_state_cols[colx] = tmp_relbl_states[:, count:count + 1]
for count, colx in enumerate(other_colxs):
tmp_state_cols[colx] = other_states[:, count:count + 1]
tmp_state_idxs = np.hstack(tmp_state_cols)
data_ids = np.array(
vt.compute_unique_data_ids_(list(map(tuple, tmp_state_idxs))))
unique_ids, groupxs = vt.group_indices(data_ids)
print('Collapsed %r states into %r states' % (
len(data_ids), len(unique_ids),))
# Sum the values in the cpd to marginalize the duplicate probs
new_values = np.array([
g.sum() for g in vt.apply_grouping(reduced_values, groupxs)
])
# Take only the unique rows under this induced labeling
unique_tmp_groupxs = np.array(ut.get_list_column(groupxs, 0))
new_aug_state_idxs = tmp_state_idxs.take(unique_tmp_groupxs, axis=0)
tmp_idx_set = set((-np.arange(-max_tmp_state,
(-min_tmp_state) + 1)).tolist())
true_idx_set = set(range(len(model.ttype2_template[label_ttype].basis)))
# Relabel the rows one more time to agree with initial constraints
for colx, true_idx in enumerate(ev_state_idxs):
tmp_idx = np.unique(new_aug_state_idxs.T[colx])
assert len(tmp_idx) == 1
tmp_idx_set -= {tmp_idx[0]}
true_idx_set -= {true_idx}
new_aug_state_idxs[new_aug_state_idxs == tmp_idx] = true_idx
# Relabel the remaining idxs
remain_tmp_idxs = sorted(list(tmp_idx_set))[::-1]
remain_true_idxs = sorted(list(true_idx_set))
for tmp_idx, true_idx in zip(remain_tmp_idxs, remain_true_idxs):
new_aug_state_idxs[new_aug_state_idxs == tmp_idx] = true_idx
# Remove evidence based augmented labels
new_state_idxs = new_aug_state_idxs.T[num_ev_:].T
return new_state_idxs, new_values
def collapse_factor_labels(model, reduced_joint, evidence):
reduced_variables = reduced_joint.variables
reduced_row_idxs = np.array(reduced_joint._row_labels(asindex=True))
reduced_values = reduced_joint.values.ravel()
new_state_idxs, new_values = collapse_labels(
model, evidence, reduced_variables, reduced_row_idxs, reduced_values)
if isinstance(reduced_joint, pgm_ext.ApproximateFactor):
new_reduced_joint = pgm_ext.ApproximateFactor(
new_state_idxs, new_values, reduced_variables,
statename_dict=reduced_joint.statename_dict)
else:
# hack into a new joint factor
# (that is the same size as the reduced_joint)
new_reduced_joint = reduced_joint.copy()
assert new_reduced_joint.values is not reduced_joint.values, (
'copy did not work')
new_reduced_joint.values[:] = 0
flat_idxs = np.ravel_multi_index(new_state_idxs.T,
new_reduced_joint.values.shape)
old_values = new_reduced_joint.values.ravel()
old_values[flat_idxs] = new_values
new_reduced_joint.values = old_values.reshape(
reduced_joint.cardinality)
# print(new_reduced_joint._str(maxrows=4, sort=-1))
# return new_reduced_joint, new_state_idxs, new_values
return new_reduced_joint
def report_partitioning_statistics(new_reduced_joint):
# compute partitioning statistics
import vtool_ibeis as vt
vals, idxs = vt.group_indices(new_reduced_joint.values.ravel())
#groupsize = list(map(len, idxs))
#groupassigns = ut.unflat_vecmap(new_reduced_joint.assignment, idxs)
all_states = new_reduced_joint._row_labels(asindex=True)
clusterstats = [tuple(sorted(list(ut.dict_hist(a).values())))
for a in all_states]
grouped_vals = ut.group_items(new_reduced_joint.values.ravel(),
clusterstats)
#probs_assigned_to_clustertype = [(
# sorted(np.unique(np.array(b).round(decimals=5)).tolist())[::-1], a)
# for a, b in grouped_vals.items()]
probs_assigned_to_clustertype = [(
ut.dict_hist(np.array(b).round(decimals=5)), a)
for a, b in grouped_vals.items()]
sortx = ut.argsort([max(c[0].keys())
for c in probs_assigned_to_clustertype])
probs_assigned_to_clustertype = ut.take(probs_assigned_to_clustertype, sortx)
# This list of 2-tuples with the first item being the unique
# probabilies that are assigned to a cluster type along with the number
# of times they were assigned. A cluster type is the second item. Every
# number represents how many annotations were assigned to a specific
# label. The length of that list is the number of total labels. For
# all low scores you will see [[{somenum: 1}, {0: 800}], [1, 1, 1, ... 1]]
# indicating that that the assignment of everyone to a different label happend once
# where the probability was somenum and a 800 times where the probability was 0.
#print(sorted([(b, a) for a, b in ut.map_dict_vals(sum, x)]).items())
#z = sorted([(b, a) for a, b in ut.map_dict_vals(sum, grouped_vals).items()])
print(ut.repr2(probs_assigned_to_clustertype, nl=2, precision=2, sorted_=True))
#group_numperlbl = [
# [sorted(list(ut.dict_hist(ut.get_list_column(a, 1)).values())) for a in assigns]
# for assigns in groupassigns]
def _test_compute_reduced_joint(model, query_vars, evidence, method):
import pgmpy
operation = 'maximize'
variables = query_vars
infr_ve = pgmpy.inference.VariableElimination(model)
joint_ve = infr_ve.compute_joint(variables, operation, evidence)
joint_ve.normalize()
joint_ve.reorder()
infr_bp = pgmpy.inference.BeliefPropagation(model)
joint_bp = infr_bp.compute_joint(variables, operation, evidence)
joint_bp.normalize()
joint_bp.reorder()
assert np.allclose(joint_ve.values, joint_bp.values)
print('VE and BP are the same')
joint_bf = model.joint_distribution()
reduce_marginalize(joint_bf, query_vars, evidence, inplace=True)
assert np.allclose(joint_bf.values, joint_bp.values)
print('BF and BP are the same')
def compute_reduced_joint(model, query_vars, evidence, method,
operation='maximize'):
import pgmpy
if method == 'approx':
# TODO: incorporate operation?
query_states = model.get_number_of_states(query_vars)
print('model.number_of_states = %r' % (
model.get_number_of_states(),))
print('query_states = %r' % (query_states,))
# Try to approximatly sample the map inference
infr = pgmpy.inference.Sampling.BayesianModelSampling(model)
# The markov blanket of a name node in our network
# can be quite large. It includes all other names.
# infr = pgmpy.inference.Sampling.GibbsSampling(model)
# import utool
# utool.embed()
#infr = pgmpy.inference.Sampling.GibbsSampling()
#infr._get_kernel_from_bayesian_model(model)
evidence_ = [pgmpy.inference.Sampling.State(*item)
for item in evidence.items()]
# TODO: apply hoffding and chernoff bounds
delta = .1 # desired probability of error
eps = .2 # desired error bound
u = 1 / (2 ** len(evidence)) # upper bound on cpd entries of evidence
k = len(evidence)
gamma = (4 * (1 + eps) / (eps ** 2)) * np.log(2 / delta)
thresh = gamma * (u ** k)
# We are observing the leaves of this network, which means
# we are effectively sampling from the prior distribution
# when using forward sampling.
Py = 1 / query_states
Py_hueristic = 1 / (4 ** len(query_vars))
M_hoffding = (np.log(2 / delta) / (2 * eps ** 2))
M_chernoff = 3 * (np.log(2 / delta) / (Py * eps ** 2))
M_chernoff_hueristic = 3 * (np.log(2 / delta) / (Py_hueristic * eps ** 2))
hueristic_size = 2 ** (len(query_vars) + 2)
size = min(100000, max(hueristic_size, 128))
print('\n-----')
print('u = %r' % (u,))
print('thresh = %r' % (thresh,))
print('k = %r' % (k,))
print('gamma = %r' % (gamma,))
print('M_chernoff_hueristic = %r' % (M_chernoff_hueristic,))
print('hueristic_size = %r' % (hueristic_size,))
print('M_hoffding = %r' % (M_hoffding,))
print('M_chernoff = %r' % (M_chernoff,))
print('size = %r' % (size,))
#np.log(2 / .1) / (2 * (.2 ** 2))
sampled = infr.likelihood_weighted_sample(evidence=evidence_,
size=size)
reduced_joint = pgm_ext.ApproximateFactor.from_sampled(sampled,
query_vars,
statename_dict=model.statename_dict)
#self = reduced_joint # NOQA
#arr = self.state_idxs # NOQA
#import utool
#utool.embed()
num_raw_states = len(reduced_joint.state_idxs)
reduced_joint.consolidate()
num_unique_states = len(reduced_joint.state_idxs)
print('[pgm] %r / %r initially sampled states are unique' % (
num_unique_states, num_raw_states,))
reduced_joint.normalize()
reduced_joint.reorder()
elif method == 'varelim':
infr = pgmpy.inference.VariableElimination(model)
reduced_joint = infr.compute_joint(query_vars, operation, evidence)
reduced_joint.normalize()
reduced_joint.reorder()
elif method in ['bp', 'beliefprop']:
# Dont brute force anymore
infr = pgmpy.inference.BeliefPropagation(model)
reduced_joint = infr.compute_joint(query_vars, operation, evidence)
reduced_joint.normalize()
reduced_joint.reorder()
elif method in ['bf', 'brute', 'bruteforce']:
# TODO: incorporate operation?
full_joint = model.joint_distribution()
reduced_joint = reduce_marginalize(full_joint, query_vars,
evidence, inplace=False)
del full_joint
else:
raise NotImplementedError('method=%r' % (method,))
return reduced_joint
def cluster_query(model, query_vars=None, evidence=None, soft_evidence=None,
method=None, operation='maximize'):
"""
CommandLine:
python -m ibeis.algo.hots.bayes --exec-cluster_query --show
GridParams:
>>> param_grid = dict(
>>> #method=['approx', 'bf', 'bp'],
>>> method=['approx', 'bp'],
>>> )
>>> combos = ut.all_dict_combinations(param_grid)
>>> index = 0
>>> keys = 'method'.split(', ')
>>> method, = ut.dict_take(combos[index], keys)
GridSetup:
>>> from ibeis.algo.hots.bayes import * # NOQA
>>> verbose = True
>>> other_evidence = {}
>>> name_evidence = [1, None, None, 0]
>>> score_evidence = [2, 0, 2]
>>> special_names = ['fred', 'sue', 'tom', 'paul']
>>> model = make_name_model(
>>> num_annots=4, num_names=4, num_scores=3, verbose=True, mode=1,
>>> special_names=special_names)
>>> method = None
>>> model, evidence, soft_evidence = update_model_evidence(
>>> model, name_evidence, score_evidence, other_evidence)
>>> evidence = model._ensure_internal_evidence(evidence)
>>> query_vars = ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable')
GridExample:
>>> # DISABLE_DOCTEST
>>> query_results = cluster_query(model, query_vars, evidence,
>>> method=method)
>>> print(ut.repr2(query_results['top_assignments'], nl=1))
>>> ut.quit_if_noshow()
>>> from ibeis.algo.hots import pgm_viz
>>> pgm_viz.show_model(model, evidence=evidence, **query_results)
>>> ut.show_if_requested()
"""
evidence = model._ensure_internal_evidence(evidence)
if query_vars is None:
query_vars = model.nodes()
orig_query_vars = query_vars # NOQA
query_vars = ut.setdiff(query_vars, list(evidence.keys()))
if method is None:
method = ut.get_argval('--method', type_=str, default='bp')
reduced_joint = compute_reduced_joint(model, query_vars, evidence,
method, operation)
new_reduced_joint = collapse_factor_labels(model, reduced_joint, evidence)
if False:
report_partitioning_statistics(new_reduced_joint)
# FIXME: are these max marginals?
max_marginals = {}
for i, var in enumerate(query_vars):
one_out = query_vars[:i] + query_vars[i + 1:]
max_marginals[var] = new_reduced_joint.marginalize(one_out,
inplace=False)
# max_marginals[var] = joint2.maximize(one_out, inplace=False)
factor_list = max_marginals.values()
# Now find the most likely state
reduced_variables = new_reduced_joint.variables
new_state_idxs = np.array(new_reduced_joint._row_labels(asindex=True))
new_values = new_reduced_joint.values.ravel()
sortx = new_values.argsort()[::-1]
sort_new_state_idxs = new_state_idxs.take(sortx, axis=0)
sort_new_values = new_values.take(sortx)
sort_new_states = list(zip(*[
ut.dict_take(model.statename_dict[var], idx)
for var, idx in
zip(reduced_variables, sort_new_state_idxs.T)]))
# Better map assignment based on knowledge of labels
map_assign = dict(zip(reduced_variables, sort_new_states[0]))
sort_reduced_rowstr_lbls = [
ut.repr2(dict(zip(reduced_variables, lbls)), explicit=True,
nobraces=True,
strvals=True)
for lbls in sort_new_states
]
top_assignments = list(zip(sort_reduced_rowstr_lbls[:4], sort_new_values))
if len(sort_new_values) > 3:
top_assignments += [('other', 1 - sum(sort_new_values[:4]))]
query_results = {
'factor_list': factor_list,
'top_assignments': top_assignments,
'map_assign': map_assign,
'method': method,
}
print('query_results = %s' % (ut.repr3(query_results, nl=2),))
return query_results
def draw_tree_model(model, **kwargs):
import plottool_ibeis as pt
import networkx as netx
if not ut.get_argval('--hackjunc'):
fnum = pt.ensure_fnum(None)
fig = pt.figure(fnum=fnum, doclf=True) # NOQA
ax = pt.gca()
#name_nodes = sorted(ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable'))
netx_graph = model.to_markov_model()
#pos = netx.pygraphviz_layout(netx_graph)
#pos = netx.graphviz_layout(netx_graph)
#pos = get_hacked_pos(netx_graph, name_nodes, prog='neato')
pos = netx.nx_pydot.pydot_layout(netx_graph)
node_color = [pt.WHITE] * len(pos)
drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color,
node_size=1100)
netx.draw(netx_graph, **drawkw)
if kwargs.get('show_title', True):
pt.set_figtitle('Markov Model')
if not ut.get_argval('--hackmarkov'):
fnum = pt.ensure_fnum(None)
fig = pt.figure(fnum=fnum, doclf=True) # NOQA
ax = pt.gca()
netx_graph = model.to_junction_tree()
# prettify nodes
def fixtupkeys(dict_):
return {
', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v)
for k, v in dict_.items()
}
# FIXME
n = fixtupkeys(netx_graph.node)
e = fixtupkeys(netx_graph.edge)
a = fixtupkeys(netx_graph.adj)
netx_graph.nodes.update(n)
netx_graph.edges.update(e)
netx_graph.adj.update(a)
#netx_graph = model.to_markov_model()
#pos = netx.pygraphviz_layout(netx_graph)
#pos = netx.graphviz_layout(netx_graph)
pos = netx.nx_pydot.pydot_layout(netx_graph)
node_color = [pt.WHITE] * len(pos)
drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color,
node_size=2000)
netx.draw(netx_graph, **drawkw)
if kwargs.get('show_title', True):
pt.set_figtitle('Junction/Clique Tree / Cluster Graph')
def get_hacked_pos(netx_graph, name_nodes=None, prog='dot'):
import pygraphviz
import networkx as netx
# Add "invisible" edges to induce an ordering
# Hack for layout (ordering of top level nodes)
netx_graph2 = netx_graph.copy()
if getattr(netx_graph, 'ttype2_cpds', None) is not None:
grouped_nodes = []
for ttype in netx_graph.ttype2_cpds.keys():
ttype_cpds = netx_graph.ttype2_cpds[ttype]
# use defined ordering
ttype_nodes = ut.list_getattr(ttype_cpds, 'variable')
# ttype_nodes = sorted(ttype_nodes)
invis_edges = list(ut.itertwo(ttype_nodes))
netx_graph2.add_edges_from(invis_edges)
grouped_nodes.append(ttype_nodes)
A = netx.to_agraph(netx_graph2)
for nodes in grouped_nodes:
A.add_subgraph(nodes, rank='same')
else:
A = netx.to_agraph(netx_graph2)
#if name_nodes is not None:
# #netx.set_node_attributes(netx_graph, name='label', values={n: {'label': n} for n in all_nodes})
# invis_edges = list(ut.itertwo(name_nodes))
# netx_graph2.add_edges_from(invis_edges)
# A.add_subgraph(name_nodes, rank='same')
#else:
# A = netx.to_agraph(netx_graph2)
args = ''
G = netx_graph
A.layout(prog=prog, args=args)
#A.draw('example.png', prog='dot')
node_pos = {}
for n in G:
node_ = pygraphviz.Node(A, n)
try:
xx, yy = node_.attr["pos"].split(',')
node_pos[n] = (float(xx), float(yy))
except:
print("no position for node", n)
node_pos[n] = (0.0, 0.0)
return node_pos
def show_model(model, evidence={}, soft_evidence={}, **kwargs):
"""
References:
http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer
Ignore:
pkg-config --libs-only-L libcgraph
sudo apt-get install libgraphviz-dev -y
sudo apt-get install libgraphviz4 -y
# sudo apt-get install pkg-config
sudo apt-get install libgraphviz-dev
# pip install git+git://github.com/pygraphviz/pygraphviz.git
pip install pygraphviz
python -c "import pygraphviz; print(pygraphviz.__file__)"
sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/"
python3 -c "import pygraphviz; print(pygraphviz.__file__)"
CommandLine:
python -m ibeis.algo.hots.bayes --exec-show_model --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.bayes import * # NOQA
>>> model = '?'
>>> evidence = {}
>>> soft_evidence = {}
>>> result = show_model(model, evidence, soft_evidence)
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'):
draw_tree_model(model, **kwargs)
return
import plottool_ibeis as pt
import networkx as netx
fnum = pt.ensure_fnum(None)
netx_graph = (model)
#netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"'
#netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR'
pos_dict = get_hacked_pos(netx_graph)
#pos_dict = netx.nx_agraph.pygraphviz_layout(netx_graph)
#pos = netx.nx_agraph.nx_pydot.pydot_layout(netx_graph, prog='dot')
#pos_dict = netx.nx_agraph.graphviz_layout(netx_graph)
textprops = {
'family': 'monospace',
'horizontalalignment': 'left',
#'horizontalalignment': 'center',
#'size': 12,
'size': 8,
}
netx_nodes = model.nodes(data=True)
node_key_list = ut.get_list_column(netx_nodes, 0)
pos_list = ut.dict_take(pos_dict, node_key_list)
var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])}
prior_text = None
post_text = None
evidence_tas = []
post_tas = []
prior_tas = []
node_color = []
has_inferred = evidence or var2_post
if has_inferred:
ignore_prior_with_ttype = [SCORE_TTYPE, MATCH_TTYPE]
show_prior = False
else:
ignore_prior_with_ttype = []
#show_prior = True
show_prior = False
dpy = 5
dbx, dby = (20, 20)
takw1 = {'bbox_align': (.5, 0), 'pos_offset': [0, dpy], 'bbox_offset': [dbx, dby]}
takw2 = {'bbox_align': (.5, 1), 'pos_offset': [0, -dpy], 'bbox_offset': [-dbx, -dby]}
name_colors = pt.distinct_colors(max(model.num_names, 10))
name_colors = name_colors[:model.num_names]
#cmap_ = 'hot' #mx = 0.65 #mn = 0.15
cmap_, mn, mx = 'plasma', 0.15, 1.0
_cmap = pt.plt.get_cmap(cmap_)
def cmap(x):
return _cmap((x * mx) + mn)
for node, pos in zip(netx_nodes, pos_list):
variable = node[0]
cpd = model.var2_cpd[variable]
prior_marg = (cpd if cpd.evidence is None else
cpd.marginalize(cpd.evidence, inplace=False))
show_evidence = variable in evidence
show_prior = cpd.ttype not in ignore_prior_with_ttype
show_post = variable in var2_post
show_prior |= cpd.ttype not in ignore_prior_with_ttype
post_marg = None
if show_post:
post_marg = var2_post[variable]
def get_name_color(phi):
order = phi.values.argsort()[::-1]
if len(order) < 2:
dist_next = phi.values[order[0]]
else:
dist_next = phi.values[order[0]] - phi.values[order[1]]
dist_total = (phi.values[order[0]])
confidence = (dist_total * dist_next) ** (2.5 / 4)
#print('confidence = %r' % (confidence,))
color = name_colors[order[0]]
color = pt.color_funcs.desaturate_rgb(color, 1 - confidence)
color = np.array(color)
return color
if variable in evidence:
if cpd.ttype == SCORE_TTYPE:
cmap_index = evidence[variable] / (cpd.variable_card - 1)
color = cmap(cmap_index)
color = pt.lighten_rgb(color, .4)
color = np.array(color)
node_color.append(color)
elif cpd.ttype == NAME_TTYPE:
color = name_colors[evidence[variable]]
color = np.array(color)
node_color.append(color)
else:
color = pt.FALSE_RED
node_color.append(color)
#elif variable in soft_evidence:
# color = pt.LIGHT_PINK
# show_prior = True
# color = get_name_color(prior_marg)
# node_color.append(color)
else:
if cpd.ttype == NAME_TTYPE and post_marg is not None:
color = get_name_color(post_marg)
node_color.append(color)
elif cpd.ttype == MATCH_TTYPE and post_marg is not None:
color = cmap(post_marg.values[1])
color = pt.lighten_rgb(color, .4)
color = np.array(color)
node_color.append(color)
else:
#color = pt.WHITE
color = pt.NEUTRAL
node_color.append(color)
if show_prior:
if variable in soft_evidence:
prior_color = pt.LIGHT_PINK
else:
prior_color = None
prior_text = pgm_ext.make_factor_text(prior_marg, 'prior')
prior_tas.append(dict(text=prior_text, pos=pos, color=prior_color, **takw2))
if show_evidence:
_takw1 = takw1
if cpd.ttype == SCORE_TTYPE:
_takw1 = takw2
evidence_text = cpd.variable_statenames[evidence[variable]]
if isinstance(evidence_text, int):
evidence_text = '%d/%d' % (evidence_text + 1, cpd.variable_card)
evidence_tas.append(dict(text=evidence_text, pos=pos, color=color, **_takw1))
if show_post:
_takw1 = takw1
if cpd.ttype == MATCH_TTYPE:
_takw1 = takw2
post_text = pgm_ext.make_factor_text(post_marg, 'post')
post_tas.append(dict(text=post_text, pos=pos, color=None, **_takw1))
def trnps_(dict_list):
""" tranpose dict list """
list_dict = ut.ddict(list)
for dict_ in dict_list:
for key, val in dict_.items():
list_dict[key + '_list'].append(val)
return list_dict
takw1_ = trnps_(post_tas + evidence_tas)
takw2_ = trnps_(prior_tas)
# Draw graph
if has_inferred:
pnum1 = (3, 1, (slice(0, 2), 0))
else:
pnum1 = None
fig = pt.figure(fnum=fnum, pnum=pnum1, doclf=True) # NOQA
ax = pt.gca()
#print('node_color = %s' % (ut.repr3(node_color),))
drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1500,
node_color=node_color)
netx.draw(netx_graph, **drawkw)
hacks = []
if len(post_tas + evidence_tas):
hacks.append(pt.draw_text_annotations(textprops=textprops, **takw1_))
if prior_tas:
hacks.append(pt.draw_text_annotations(textprops=textprops, **takw2_))
xmin, ymin = np.array(pos_list).min(axis=0)
xmax, ymax = np.array(pos_list).max(axis=0)
num_annots = len(model.ttype2_cpds[NAME_TTYPE])
if num_annots > 4:
ax.set_xlim((xmin - 40, xmax + 40))
ax.set_ylim((ymin - 50, ymax + 50))
fig.set_size_inches(30, 7)
else:
ax.set_xlim((xmin - 42, xmax + 42))
ax.set_ylim((ymin - 50, ymax + 50))
fig.set_size_inches(23, 7)
fig = pt.gcf()
title = 'num_names=%r, num_annots=%r' % (model.num_names, num_annots,)
map_assign = kwargs.get('map_assign', None)
top_assignments = kwargs.get('top_assignments', None)
if top_assignments is not None:
map_assign, map_prob = top_assignments[0]
if map_assign is not None:
def word_insert(text):
return '' if len(text) == 0 else text + ' '
title += '\n%sMAP: ' % (word_insert(kwargs.get('method', '')))
title += map_assign + ' @' + '%.2f%%' % (100 * map_prob,)
if kwargs.get('show_title', True):
pt.set_figtitle(title, size=14)
for hack in hacks:
hack()
# Hack in colorbars
if has_inferred:
pt.colorbar(np.linspace(0, 1, len(name_colors)), name_colors, lbl=NAME_TTYPE,
ticklabels=model.ttype2_template[NAME_TTYPE].basis, ticklocation='left')
basis = model.ttype2_template[SCORE_TTYPE].basis
scalars = np.linspace(0, 1, len(basis))
scalars = np.linspace(0, 1, 100)
colors = pt.scores_to_color(scalars, cmap_=cmap_, reverse_cmap=False,
cmap_range=(mn, mx))
colors = [pt.lighten_rgb(c, .4) for c in colors]
if ut.list_type(basis) is int:
pt.colorbar(scalars, colors, lbl=SCORE_TTYPE, ticklabels=np.array(basis) + 1)
else:
pt.colorbar(scalars, colors, lbl=SCORE_TTYPE, ticklabels=basis)
#print('basis = %r' % (basis,))
# Draw probability hist
if has_inferred and top_assignments is not None:
bin_labels = ut.get_list_column(top_assignments, 0)
bin_vals = ut.get_list_column(top_assignments, 1)
# bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels]
pt.draw_histogram(bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))),
transpose=True,
use_darkbackground=False,
#xtick_rotation=-10,
ylabel='Prob', xlabel='assignment')
pt.set_title('Assignment probabilities')
#fpath = ('name_model_' + suff + '.png')
#pt.plt.savefig(fpath)
#return fpath
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.algo.hots.bayes
python -m ibeis.algo.hots.bayes --allexamples
"""
if ut.VERBOSE:
print('[hs] bayes')
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
import warnings
import numpy as np
from pandas import compat
from pandas._libs import reduction
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_extension_type,
is_sequence)
from pandas.util._decorators import cache_readonly
from pandas.io.formats.printing import pprint_thing
def frame_apply(obj, func, axis=0, broadcast=None,
raw=False, reduce=None, result_type=None,
ignore_failures=False,
args=None, kwds=None):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, result_type=result_type,
ignore_failures=ignore_failures,
args=args, kwds=kwds)
class FrameApply(object):
def __init__(self, obj, func, broadcast, raw, reduce, result_type,
ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, 'reduce', 'broadcast', 'expand']:
raise ValueError("invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}")
if broadcast is not None:
warnings.warn("The broadcast argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='broadcast' to broadcast the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if broadcast:
result_type = 'broadcast'
if reduce is not None:
warnings.warn("The reduce argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='reduce' to try to reduce the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if reduce:
if result_type is not None:
raise ValueError(
"cannot pass both reduce=True and result_type")
result_type = 'reduce'
self.result_type = result_type
# curry if needed
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, compat.string_types):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = compat.signature(func)
if 'axis' in sig.args:
self.kwds['axis'] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.f(self.values)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ['reduce', None]:
return self.obj.copy()
# we may need to infer
reduce = self.result_type == 'reduce'
from pandas import Series
if not reduce:
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(result_values,
index=target.index,
columns=target.columns)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (self.result_type in ['reduce', None] and
not self.dtypes.apply(is_extension_type).any()):
# Create a dummy Series from an empty array
from pandas import Series
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=index, dtype=values.dtype)
try:
result = reduction.reduce(values, self.f,
axis=self.axis,
dummy=dummy,
labels=labels)
return self.obj._constructor_sliced(result, index=labels)
except Exception:
pass
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def get_result(self):
# dispatch to agg
if isinstance(self.f, (list, dict)):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
return super(FrameRowApply, self).get_result()
def apply_broadcast(self):
return super(FrameRowApply, self).apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1)
for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
try:
result.index = self.res_columns
except ValueError:
pass
try:
result.columns = self.res_index
except ValueError:
pass
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super(FrameColumnApply, self).apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values,
self.index)))
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == 'expand':
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
|
|
#!/usr/bin/python
__appname__ = 'libgdx_library_updater'
__version__ = "0.91"
__author__ = "Jon Renner <[email protected]>"
__url__ = "http://github.com/jrenner/libgdx-updater"
__licence__ = "MIT"
import os, time, sys, urllib2, re, datetime, tempfile, zipfile, argparse
# error handling functions and utils
def fatal_error(msg):
print "ERROR: %s" % msg
sys.exit(1)
def warning_error(msg):
print "WARNING: %s" % msg
if not FORCE:
answer = confirm("abort? (Y/n): ")
if answer in YES:
fatal_error("USER QUIT")
def confirm(msg):
answer = raw_input(msg)
return answer.lower()
def human_time(t):
minutes = t / 60
seconds = t % 60
return "%.0fm %.1fs" % (minutes, seconds)
# constants
YES = ['y', 'ye', 'yes', '']
# for finding the time of the latest nightly build from the web page html
DATE_RE = r"[0-9]{1,2}-[A-Za-z]{3,4}-[0-9]{4}\s[0-9]+:[0-9]+"
REMOTE_DATE_FORMAT = "%d-%b-%Y %H:%M"
SUPPORTED_PLATFORMS = ['android', 'android_x86', 'desktop', 'gwt', 'ios', 'box2d', 'bullet']
CORE_LIBS = [
"gdx.jar",
"gdx-sources.jar",
]
DESKTOP_LIBS = [
"gdx-backend-lwjgl.jar",
"gdx-backend-lwjgl-natives.jar",
"gdx-natives.jar",
]
ANDROID_LIBS = [
"gdx-backend-android.jar",
"armeabi/libgdx.so",
"armeabi-v7a/libgdx.so",
]
ANDROID_X86_LIBS = [
"x86/libgdx.so",
]
GWT_LIBS = ["gdx-backend-gwt.jar"]
ROBOVM_LIBS = [
"gdx-backend-robovm.jar",
"ios/libgdx.a",
"ios/libObjectAL.a"
]
BOX2D = [
"gdx-box2d.jar",
"gdx-box2d-natives.jar",
"gdx-box2d-gwt.jar",
"armeabi/libgdx-box2d.so",
"armeabi-v7a/libgdx-box2d.so",
"x86/libgdx-box2d.so",
"gdx-box2d-sources.jar",
"gdx-box2d-gwt-sources.jar",
"ios/libgdx-box2d.a"
]
BULLET = [
"gdx-bullet.jar",
"gdx-bullet-natives.jar",
"armeabi/libgdx-bullet.so",
"armeabi-v7a/libgdx-bullet.so",
"ios/libdgx-bullet.a",
"x86/libgdx-bullet.so",
"sources/gdx-bullet-sources.jar",
]
# parse arguments
EPILOGUE_TEXT = "%s\n%s" % (__author__, __url__) + "\nUSE AT YOUR OWN RISK!"
parser = argparse.ArgumentParser(description='LibGDX Library Updater %s' % __version__, epilog=EPILOGUE_TEXT)
parser.add_argument('-d', '--directory', help='set the libgdx project/workspace directory', default=os.getcwd())
parser.add_argument('-i', '--interactive', action='store_true', help='ask for confirmation for every file', default=False)
parser.add_argument('-f', '--force-update', action='store_true', help='no confirmations, just update without checking nightly\'s datetime', default=False)
parser.add_argument('-a', '--archive', help='specify libgdx zip file to use for update', default=None)
args = parser.parse_args()
PROJECT_DIR = args.directory
INTERACTIVE = args.interactive
FORCE = args.force_update
ARCHIVE = args.archive
# mutually exclusive
if FORCE:
INTERACTIVE = False
# check the time of the latest archive on the nightlies server
def get_remote_archive_mtime():
index_page = urllib2.urlopen("http://libgdx.badlogicgames.com/nightlies/")
contents = index_page.read()
print "-- OK --"
# regex for filename
regex = r"libgdx-nightly-latest\.zip"
# add regex for anything followed by the nighlty html time format
regex += r".*%s" % DATE_RE
try:
result = re.findall(regex, contents)[0]
except IndexError as e:
print "REGEX ERROR: failed to find '%s' in:\n%s" % (regex, contents)
fatal_error("regex failure to match")
try:
mtime = re.findall(DATE_RE, result)[0]
except IndexError as e:
print "REGEX ERROR: failed to find datetime in: %s" % result
fatal_error("regex failure to match")
dtime = datetime.datetime.strptime(mtime, REMOTE_DATE_FORMAT)
return dtime
# downloads and returns a temporary file contained the latest nightly archive
def download_libgdx_zip():
libgdx = tempfile.TemporaryFile()
url = "http://libgdx.badlogicgames.com/nightlies/libgdx-nightly-latest.zip"
# testing url - don't hammer badlogic server, host the file on localhost instead
# url = "http://localhost/libgdx-nightly-latest.zip"
resp = urllib2.urlopen(url)
print "downloading file: %s" % url
total_size = resp.info().getheader('Content-Length').strip()
total_size = int(total_size)
# base 10 SI units - following Ubuntu policy because it makes sense - https://wiki.ubuntu.com/UnitsPolicy
total_size_megabytes = total_size / 1000000.0
bytes_read = 0
chunk_size = 10000 # 10kB per chunk
while True:
chunk = resp.read(chunk_size)
libgdx.write(chunk)
bytes_read += len(chunk)
bytes_read_megabytes = bytes_read / 1000000.0
percent = (bytes_read / float(total_size)) * 100
sys.stdout.write("\rprogress: {:>8}{:.2f} / {:.2f} MB ({:.0f}% complete)".format(
"", bytes_read_megabytes, total_size_megabytes, percent))
sys.stdout.flush()
if bytes_read >= total_size:
print "\nfinished download"
break
return libgdx
def update_files(libs, locations, archive):
for lib in libs:
# check to see if the lib exists in the project
if locations[lib] is None:
continue
# it's time for a dirty hack - shame on me
if lib == "gdx-sources.jar":
archive_name = "sources/gdx-sources.jar"
elif 'box2d' in lib:
archive_name = "extensions/gdx-box2d/" + lib
elif 'bullet' in lib:
archive_name = "extensions/gdx-bullet/" + lib
else:
archive_name = lib
# end dirty hack
if archive_name in archive.namelist():
if INTERACTIVE:
answer = confirm("overwrite %s? (Y/n): " % lib)
if answer not in YES:
print "skipped: %s" % lib
continue
with archive.open(archive_name, "r") as fin:
filename = os.path.basename(lib)
final_path = os.path.join(locations[lib], filename)
with open(final_path, "w") as fout:
fout.write(fin.read())
print "extracted to %s" % final_path
else:
warning_error("Couldn't find %s in .zip archive" % lib)
def run_core(locations, archive):
title("CORE")
update_files(CORE_LIBS, locations, archive)
def run_android(locations, archive):
title("ANDROID")
update_files(ANDROID_LIBS, locations, archive)
def run_android_x86(locations, archive):
title("ANDROID_X86")
update_files(ANDROID_X86_LIBS, locations, archive)
def run_desktop(locations, archive):
title("DESKTOP")
update_files(DESKTOP_LIBS, locations, archive)
def run_gwt(locations, archive):
title("GWT")
update_files(GWT_LIBS, locations, archive)
def run_ios(locations, archive):
title("IOS-ROBOVM")
update_files(ROBOVM_LIBS, locations, archive)
def run_box2d(locations, archive):
title("EXTENSION: BOX2D")
update_files(BOX2D, locations, archive)
def run_bullet(locations, archive):
title("EXTENSION: BULLET")
update_files(BULLET, locations, archive)
def search_for_lib_locations(directory):
platforms = []
search_list = CORE_LIBS + DESKTOP_LIBS + ANDROID_LIBS + ANDROID_X86_LIBS + GWT_LIBS + ROBOVM_LIBS + BOX2D + BULLET
locations = {}
for element in search_list:
locations[element] = None
for (this_dir, dirs, files) in os.walk(directory):
for element in search_list:
split_path = os.path.split(element)
path = os.path.split(split_path[0])[-1]
filename = split_path[1]
for f in files:
match = False
if filename == f:
f_dir = os.path.split(this_dir)[-1]
if path == "":
match = True
else:
if path == f_dir:
match = True
if match:
if locations[element] != None:
print "WARNING: found %s in more than one place!" % element
if not FORCE:
answer = confirm("continue? (Y/n): ")
if answer not in YES:
fatal_error("USER ABORT")
locations[element] = this_dir
found_libraries = [lib for lib, loc in locations.items() if locations[lib] != None]
if found_all_in_set(CORE_LIBS, found_libraries):
platforms.append("core")
if found_all_in_set(ANDROID_LIBS, found_libraries):
platforms.append("android")
if found_all_in_set(ANDROID_X86_LIBS, found_libraries):
platforms.append("android_x86")
if found_all_in_set(DESKTOP_LIBS, found_libraries):
platforms.append("desktop")
if found_all_in_set(GWT_LIBS, found_libraries):
platforms.append("gwt")
if found_all_in_set(ROBOVM_LIBS, found_libraries):
platforms.append("ios")
if found_any_in_set(BOX2D, found_libraries):
platforms.append("box2d")
if found_any_in_set(BULLET, found_libraries):
platforms.append("bullet")
print "WARNING - did not find the following:"
for lib, loc in locations.items():
if loc == None:
print "\t%s not found" % lib
for lib, loc in locations.items():
if loc != None:
print "found %s -> %s" % (lib, loc)
return platforms, locations
def found_all_in_set(lib_set, found_list):
for lib in lib_set:
if lib not in found_list:
return False
return True
def found_any_in_set(lib_set, found_list):
for lib in lib_set:
if lib in found_list:
return True
return False
def main():
global ARCHIVE
start_time = time.time()
print "finding local libraries in %s" % PROJECT_DIR
platforms, locations = search_for_lib_locations(PROJECT_DIR)
if "core" not in platforms:
fatal_error("did not find CORE libraries %s in project directory tree" % str(CORE_LIBS))
else:
print "found CORE libraries"
for supported in SUPPORTED_PLATFORMS:
if supported in platforms:
print "found libraries for platform: %s" % supported.upper()
else:
pass
#print "WARNING: did not find libraries for platform: %s - WILL NOT UPDATE" % supported.upper()
if ARCHIVE == None:
print "checking latest nightly..."
mtime = get_remote_archive_mtime()
print "lastest nightly from server: %s" % mtime
if not FORCE:
answer = confirm("replace local libraries with files from latest nightly?(Y/n): ")
if answer not in YES:
fatal_error("USER QUIT")
libgdx = download_libgdx_zip()
else:
if not os.path.exists(ARCHIVE):
fatal_error("archive file not found: %s" % ARCHIVE)
if os.path.isdir(ARCHIVE):
selected = select_archive_from_dir(ARCHIVE)
if selected == None:
fatal_error("no archive chosen to update with, check supplied argument: '%s'" % ARCHIVE)
else:
ARCHIVE = selected
if not FORCE:
answer = confirm("replace local libraries with files from '%s'?(Y/n): " % os.path.basename(ARCHIVE))
if answer not in YES:
fatal_error("USER QUIT")
libgdx = open(ARCHIVE, "r")
with zipfile.ZipFile(libgdx) as archive:
if "core" in platforms:
run_core(locations, archive)
if "desktop" in platforms:
run_desktop(locations, archive)
if "android" in platforms:
run_android(locations, archive)
if "gwt" in platforms:
run_gwt(locations, archive)
if "ios" in platforms:
run_ios(locations, archive)
if "box2d" in platforms:
run_box2d(locations, archive)
if "bullet" in platforms:
run_bullet(locations,archive)
duration = time.time() - start_time
print "finished updates in %s" % human_time(duration)
libgdx.close()
def find_zips(path):
zips = []
for item in os.listdir(path):
pieces = item.split(".")
if len(pieces) > 0:
if (pieces[-1] == "zip"):
zips.append(item)
return zips
def select_archive_from_dir(archive_dir):
zips = find_zips(archive_dir)
if len(zips) == 0:
return None
for z in zips:
full_path = os.path.join(archive_dir, z)
answer = confirm("use archive '%s'? (Y/n): " % full_path)
if answer in YES:
return full_path
return None
def title(text):
dashes = "-" * 10
print dashes + " %s " % text + dashes
if __name__ == "__main__":
main()
|
|
import hashlib
import hmac
from .compat import binary_type, constant_time_compare, is_string_type
from .exceptions import InvalidKeyError
from .utils import der_to_raw_signature, raw_to_der_signature
try:
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_pem_public_key, load_ssh_public_key
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKey, RSAPublicKey
)
from cryptography.hazmat.primitives.asymmetric.ec import (
EllipticCurvePrivateKey, EllipticCurvePublicKey
)
from cryptography.hazmat.primitives.asymmetric import ec, padding
from cryptography.hazmat.backends import default_backend
from cryptography.exceptions import InvalidSignature
has_crypto = True
except ImportError:
has_crypto = False
def get_default_algorithms():
"""
Returns the algorithms that are implemented by the library.
"""
default_algorithms = {
'none': NoneAlgorithm(),
'HS256': HMACAlgorithm(HMACAlgorithm.SHA256),
'HS384': HMACAlgorithm(HMACAlgorithm.SHA384),
'HS512': HMACAlgorithm(HMACAlgorithm.SHA512)
}
if has_crypto:
default_algorithms.update({
'RS256': RSAAlgorithm(RSAAlgorithm.SHA256),
'RS384': RSAAlgorithm(RSAAlgorithm.SHA384),
'RS512': RSAAlgorithm(RSAAlgorithm.SHA512),
'ES256': ECAlgorithm(ECAlgorithm.SHA256),
'ES384': ECAlgorithm(ECAlgorithm.SHA384),
'ES512': ECAlgorithm(ECAlgorithm.SHA512),
'PS256': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA256),
'PS384': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA384),
'PS512': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA512)
})
return default_algorithms
class Algorithm(object):
"""
The interface for an algorithm used to sign and verify tokens.
"""
def prepare_key(self, key):
"""
Performs necessary validation and conversions on the key and returns
the key value in the proper format for sign() and verify().
"""
raise NotImplementedError
def sign(self, msg, key):
"""
Returns a digital signature for the specified message
using the specified key value.
"""
raise NotImplementedError
def verify(self, msg, key, sig):
"""
Verifies that the specified digital signature is valid
for the specified message and key values.
"""
raise NotImplementedError
class NoneAlgorithm(Algorithm):
"""
Placeholder for use when no signing or verification
operations are required.
"""
def prepare_key(self, key):
if key == '':
key = None
if key is not None:
raise InvalidKeyError('When alg = "none", key value must be None.')
return key
def sign(self, msg, key):
return b''
def verify(self, msg, key, sig):
return False
class HMACAlgorithm(Algorithm):
"""
Performs signing and verification operations using HMAC
and the specified hash function.
"""
SHA256 = hashlib.sha256
SHA384 = hashlib.sha384
SHA512 = hashlib.sha512
def __init__(self, hash_alg):
self.hash_alg = hash_alg
def prepare_key(self, key):
if not is_string_type(key):
raise TypeError('Expecting a string- or bytes-formatted key.')
if not isinstance(key, binary_type):
key = key.encode('utf-8')
invalid_strings = [
b'-----BEGIN PUBLIC KEY-----',
b'-----BEGIN CERTIFICATE-----',
b'ssh-rsa'
]
if any([string_value in key for string_value in invalid_strings]):
raise InvalidKeyError(
'The specified key is an asymmetric key or x509 certificate and'
' should not be used as an HMAC secret.')
return key
def sign(self, msg, key):
return hmac.new(key, msg, self.hash_alg).digest()
def verify(self, msg, key, sig):
return constant_time_compare(sig, self.sign(msg, key))
if has_crypto:
class RSAAlgorithm(Algorithm):
"""
Performs signing and verification operations using
RSASSA-PKCS-v1_5 and the specified hash function.
"""
SHA256 = hashes.SHA256
SHA384 = hashes.SHA384
SHA512 = hashes.SHA512
def __init__(self, hash_alg):
self.hash_alg = hash_alg
def prepare_key(self, key):
if isinstance(key, RSAPrivateKey) or \
isinstance(key, RSAPublicKey):
return key
if is_string_type(key):
if not isinstance(key, binary_type):
key = key.encode('utf-8')
try:
if key.startswith(b'ssh-rsa'):
key = load_ssh_public_key(key, backend=default_backend())
else:
key = load_pem_private_key(key, password=None, backend=default_backend())
except ValueError:
key = load_pem_public_key(key, backend=default_backend())
else:
raise TypeError('Expecting a PEM-formatted key.')
return key
def sign(self, msg, key):
signer = key.signer(
padding.PKCS1v15(),
self.hash_alg()
)
signer.update(msg)
return signer.finalize()
def verify(self, msg, key, sig):
verifier = key.verifier(
sig,
padding.PKCS1v15(),
self.hash_alg()
)
verifier.update(msg)
try:
verifier.verify()
return True
except InvalidSignature:
return False
class ECAlgorithm(Algorithm):
"""
Performs signing and verification operations using
ECDSA and the specified hash function
"""
SHA256 = hashes.SHA256
SHA384 = hashes.SHA384
SHA512 = hashes.SHA512
def __init__(self, hash_alg):
self.hash_alg = hash_alg
def prepare_key(self, key):
if isinstance(key, EllipticCurvePrivateKey) or \
isinstance(key, EllipticCurvePublicKey):
return key
if is_string_type(key):
if not isinstance(key, binary_type):
key = key.encode('utf-8')
# Attempt to load key. We don't know if it's
# a Signing Key or a Verifying Key, so we try
# the Verifying Key first.
try:
key = load_pem_public_key(key, backend=default_backend())
except ValueError:
key = load_pem_private_key(key, password=None, backend=default_backend())
else:
raise TypeError('Expecting a PEM-formatted key.')
return key
def sign(self, msg, key):
signer = key.signer(ec.ECDSA(self.hash_alg()))
signer.update(msg)
der_sig = signer.finalize()
return der_to_raw_signature(der_sig, key.curve)
def verify(self, msg, key, sig):
try:
der_sig = raw_to_der_signature(sig, key.curve)
except ValueError:
return False
verifier = key.verifier(der_sig, ec.ECDSA(self.hash_alg()))
verifier.update(msg)
try:
verifier.verify()
return True
except InvalidSignature:
return False
class RSAPSSAlgorithm(RSAAlgorithm):
"""
Performs a signature using RSASSA-PSS with MGF1
"""
def sign(self, msg, key):
signer = key.signer(
padding.PSS(
mgf=padding.MGF1(self.hash_alg()),
salt_length=self.hash_alg.digest_size
),
self.hash_alg()
)
signer.update(msg)
return signer.finalize()
def verify(self, msg, key, sig):
verifier = key.verifier(
sig,
padding.PSS(
mgf=padding.MGF1(self.hash_alg()),
salt_length=self.hash_alg.digest_size
),
self.hash_alg()
)
verifier.update(msg)
try:
verifier.verify()
return True
except InvalidSignature:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.