gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from . import tasks, util
from .extensions import db, login_mgr, micropub
from .models import Feed, Entry, User, Subscription
import flask.ext.login as flask_login
import binascii
import bs4
import datetime
import feedparser
import flask
import mf2py
import mf2util
import requests
import re
import urllib
import cgi
import sqlalchemy
views = flask.Blueprint('views', __name__)
@views.route('/')
def index():
page = int(flask.request.args.get('page', 1))
entry_tups = []
ws_topic = None
solo = False
all_tags = set()
if flask_login.current_user.is_authenticated():
for subsc in flask_login.current_user.subscriptions:
if subsc.tags:
all_tags.update(subsc.tags.split())
per_page = flask.current_app.config.get('PER_PAGE', 30)
offset = (page - 1) * per_page
entry_query = db.session.query(Entry, Subscription)\
.options(
sqlalchemy.orm.subqueryload(Entry.feed),
sqlalchemy.orm.subqueryload(Entry.reply_context))\
.join(Entry.feed)\
.join(Feed.subscriptions)\
.join(Subscription.user)\
.filter(User.id == flask_login.current_user.id)
if 'entry' in flask.request.args:
entry_url = flask.request.args.get('entry')
entry_tup = entry_query.filter(Entry.permalink == entry_url)\
.order_by(Entry.retrieved.desc())\
.first()
if not entry_tup:
flask.abort(404)
entry_tups = [entry_tup]
solo = True
else:
if 'tag' in flask.request.args:
tag = flask.request.args.get('tag')
entry_query = entry_query.filter(
Subscription.tags.like('%{}%'.format(tag)))
elif 'subscription' in flask.request.args:
subsc_id = flask.request.args.get('subscription')
subsc = Subscription.query.get(subsc_id)
if not subsc:
flask.abort(404)
entry_query = entry_query.filter(Subscription.id == subsc_id)
ws_topic = 'subsc:{}'.format(subsc.id)
else:
ws_topic = 'user:{}'.format(flask_login.current_user.id)
entry_query = entry_query.order_by(Entry.retrieved.desc(),
Entry.published.desc())\
.offset(offset).limit(per_page)
entry_tups = entry_query.all()
# stick the subscription into the entry.
# FIXME this is hacky
entries = []
for entry, subsc in entry_tups:
entry.subscription = subsc
entries.append(entry)
entries = dedupe_copies(entries)
resp = flask.make_response(
flask.render_template('feed.jinja2', entries=entries, page=page,
ws_topic=ws_topic, solo=solo,
all_tags=all_tags))
resp.headers['Cache-control'] = 'max-age=0'
return resp
@views.route('/install')
def install():
db.create_all()
return 'Success!'
@views.route('/subscriptions')
@flask_login.login_required
def subscriptions():
subscs = Subscription\
.query\
.filter_by(user_id=flask_login.current_user.id)\
.options(sqlalchemy.orm.subqueryload(Subscription.feed))\
.order_by(db.func.lower(Subscription.name))\
.all()
return flask.render_template('subscriptions.jinja2',
subscriptions=subscs)
@views.route('/settings', methods=['GET', 'POST'])
@flask_login.login_required
def settings():
settings = flask_login.current_user.settings or {}
if flask.request.method == 'GET':
return flask.render_template('settings.jinja2', settings=settings)
settings = dict(settings)
reply_method = flask.request.form.get('reply-method')
settings['reply-method'] = reply_method
flask_login.current_user.settings = settings
db.session.commit()
next_page = '.settings'
if reply_method == 'micropub':
next_page = '.settings_micropub'
elif reply_method == 'indie-config':
next_page = '.settings_indie_config'
elif reply_method == 'action-urls':
next_page = '.settings_action_urls'
return flask.redirect(flask.url_for(next_page))
@views.route('/settings/micropub')
@flask_login.login_required
def settings_micropub():
settings = flask_login.current_user.settings or {}
return flask.render_template('settings_micropub.jinja2', settings=settings)
@views.route('/settings/indie-config', methods=['GET', 'POST'])
@flask_login.login_required
def settings_indie_config():
settings = flask_login.current_user.settings or {}
if flask.request.method == 'GET':
return flask.render_template('settings_indie_config.jinja2',
settings=settings)
settings = dict(settings)
settings['indie-config-actions'] = flask.request.form.getlist(
'indie-config-action')
flask_login.current_user.settings = settings
print('new settings: ', settings)
db.session.commit()
return flask.redirect(flask.url_for('.index'))
@views.route('/settings/action-urls', methods=['GET', 'POST'])
@flask_login.login_required
def settings_action_urls():
settings = flask_login.current_user.settings or {}
if flask.request.method == 'GET':
return flask.render_template('settings_action_urls.jinja2',
settings=settings)
settings = dict(settings)
zipped = zip(
flask.request.form.getlist('action'),
flask.request.form.getlist('action-url'))
settings['action-urls'] = [[k, v] for k, v in zipped if k and v]
flask_login.current_user.settings = settings
db.session.commit()
return flask.redirect(flask.url_for('.index'))
@views.route('/update_feed', methods=['POST'])
@flask_login.login_required
def update_feed():
feed_id = flask.request.form.get('id')
tasks.q.enqueue(tasks.update_feed, feed_id)
return flask.redirect(flask.url_for('.subscriptions'))
@views.route('/update_all', methods=['POST'])
@flask_login.login_required
def update_all():
for s in flask_login.current_user.subscriptions:
tasks.q.enqueue(tasks.update_feed, s.feed.id)
return flask.redirect(flask.url_for('.subscriptions'))
@views.route('/unsubscribe', methods=['POST'])
@flask_login.login_required
def unsubscribe():
subsc_id = flask.request.form.get('id')
subsc = Subscription.query.get(subsc_id)
db.session.delete(subsc)
db.session.commit()
flask.flash('Unsubscribed {}'.format(subsc.name))
return flask.redirect(flask.url_for('.subscriptions'))
@views.route('/edit_subscription', methods=['POST'])
@flask_login.login_required
def edit_subscription():
subsc_id = flask.request.form.get('id')
subsc_name = flask.request.form.get('name')
subsc_tags = flask.request.form.get('tags')
subsc = Subscription.query.get(subsc_id)
if subsc_name:
subsc.name = subsc_name
if subsc_tags:
tag_list = re.split(r'(?:\s|,)+', subsc_tags)
subsc.tags = ' '.join(t.strip() for t in tag_list if t.strip())
else:
subsc.tags = None
db.session.commit()
flask.flash('Edited {}'.format(subsc.name))
return flask.redirect(flask.url_for('.subscriptions'))
@views.route('/logout')
def logout():
flask_login.logout_user()
return flask.redirect(flask.url_for('.index'))
@views.route('/login', methods=['POST'])
def login():
me = flask.request.form.get('me')
if not me or me == 'http://':
flask.flash('Sign in with your personal web address.')
return flask.redirect(flask.url_for('.index'))
return micropub.authenticate(
me=me, next_url=flask.request.form.get('next'))
@views.route('/login-callback')
@micropub.authenticated_handler
def login_callback(resp):
if not resp.me:
flask.flash(cgi.escape('Login error: ' + resp.error))
return flask.redirect(flask.url_for('.index'))
if resp.error:
flask.flash(cgi.escape('Warning: ' + resp.error))
user = load_user(resp.me)
if not user:
user = User(url=resp.me)
db.session.add(user)
db.session.commit()
flask_login.login_user(user, remember=True)
update_micropub_syndicate_to()
return flask.redirect(resp.next_url or flask.url_for('.index'))
@views.route('/authorize')
@flask_login.login_required
def authorize():
return micropub.authorize(
me=flask_login.current_user.url,
next_url=flask.request.args.get('next'),
scope='post')
@views.route('/micropub-callback')
@micropub.authorized_handler
def micropub_callback(resp):
if not resp.me or resp.error:
flask.flash(cgi.escape('Authorize error: ' + resp.error))
return flask.redirect(flask.url_for('.index'))
user = load_user(resp.me)
if not user:
flask.flash(cgi.escape('Unknown user for url: ' + resp.me))
return flask.redirect(flask.url_for('.index'))
user.micropub_endpoint = resp.micropub_endpoint
user.access_token = resp.access_token
db.session.commit()
update_micropub_syndicate_to()
flask.flash('Logged in as ' + user.url)
return flask.redirect(resp.next_url or flask.url_for('.index'))
@flask_login.login_required
def update_micropub_syndicate_to():
endpt = flask_login.current_user.micropub_endpoint
token = flask_login.current_user.access_token
if not endpt or not token:
return
resp = util.requests_get(endpt, params={
'q': 'syndicate-to',
}, headers={
'Authorization': 'Bearer ' + token,
})
if resp.status_code // 100 != 2:
flask.current_app.logger.warn(
'Unexpected response querying micropub endpoint %s: %s',
resp, resp.text)
return
syndicate_tos = urllib.parse.parse_qs(resp.text).get('syndicate-to[]', [])
flask_login.current_user.set_setting('syndicate-to', syndicate_tos)
db.session.commit()
@views.route('/deauthorize')
@flask_login.login_required
def deauthorize():
flask_login.current_user.micropub_endpoint = None
flask_login.current_user.access_token = None
db.session.commit()
return flask.redirect(flask.request.args.get('next')
or flask.url_for('.index'))
@login_mgr.user_loader
def load_user(url):
alt = url.rstrip('/') if url.endswith('/') else url + '/'
return User.query.filter(
(User.url == url) | (User.url == alt)).first()
@views.route('/subscribe', methods=['GET', 'POST'])
@flask_login.login_required
def subscribe():
origin = (flask.request.form.get('origin')
or flask.request.args.get('origin'))
if origin:
type = None
feed = None
typed_feed = flask.request.form.get('feed')
if typed_feed:
type, feed = typed_feed.split('|', 1)
else:
feeds = find_possible_feeds(origin)
if not feeds:
flask.flash('No feeds found for: ' + origin)
return flask.redirect(flask.url_for('.index'))
if len(feeds) > 1:
return flask.render_template(
'select-feed.jinja2', origin=origin, feeds=feeds)
feed = feeds[0]['feed']
type = feeds[0]['type']
new_feed = add_subscription(origin, feed, type)
flask.flash('Successfully subscribed to: {}'.format(new_feed.name))
return flask.redirect(flask.url_for('.index'))
if flask.request.method == 'POST':
flask.abort(400)
return flask.render_template('subscribe.jinja2')
def add_subscription(origin, feed_url, type, tags=None):
feed = Feed.query.filter_by(feed=feed_url, type=type).first()
if not feed:
name = None
if type == 'html':
flask.current_app.logger.debug('mf2py parsing %s', feed_url)
resp = util.requests_get(feed_url)
feed_text = resp.text if 'charset' in resp.headers.get('content-type', '') else resp.content
parsed = mf2util.interpret_feed(
mf2py.parse(doc=feed_text, url=feed_url), feed_url)
name = parsed.get('name')
elif type == 'xml':
flask.current_app.logger.debug('feedparser parsing %s', feed_url)
parsed = feedparser.parse(feed_url, agent=util.USER_AGENT)
if parsed.feed:
name = parsed.feed.get('title')
else:
flask.current_app.logger.error('unknown feed type %s', type)
flask.abort(400)
if not name:
p = urllib.parse.urlparse(origin)
name = p.netloc + p.path
feed = Feed(name=name[:140], origin=origin, feed=feed_url, type=type)
if feed:
db.session.add(feed)
flask_login.current_user.subscriptions.append(
Subscription(feed=feed, name=feed.name, tags=tags))
db.session.commit()
# go ahead and update the fed
tasks.q.enqueue(tasks.update_feed, feed.id)
return feed
def find_possible_feeds(origin):
# scrape an origin source to find possible alternative feeds
try:
resp = util.requests_get(origin)
except requests.exceptions.RequestException as e:
flask.flash('Error fetching source {}'.format(repr(e)))
flask.current_app.logger.warn(
'Subscribe failed for %s with error %s', origin, repr(e))
return None
feeds = []
xml_feed_types = [
'application/rss+xml',
'application/atom+xml',
'application/rdf+xml',
'application/xml',
'text/xml',
]
xml_mime_types = xml_feed_types + [
'text/xml',
'text/rss+xml',
'text/atom+xml',
]
content_type = resp.headers['content-type']
content_type = content_type.split(';', 1)[0].strip()
if content_type in xml_mime_types:
feeds.append({
'origin': origin,
'feed': origin,
'type': 'xml',
'title': 'untitled xml feed',
})
elif content_type == 'text/html':
parsed = mf2py.parse(doc=resp.text, url=origin)
# if text/html, then parse and look for h-entries
hfeed = mf2util.interpret_feed(parsed, origin)
if hfeed.get('entries'):
ftitle = hfeed.get('name') or 'untitled h-feed'
feeds.append({
'origin': origin,
'feed': resp.url,
'type': 'html',
'title': ftitle[:140]
})
# look for link="feed"
for furl in parsed.get('rels', {}).get('feed', []):
fprops = parsed.get('rel-urls', {}).get(furl, {})
if not fprops.get('type') or fprops.get('type') == 'text/html':
feeds.append({
'origin': origin,
'feed': furl,
'type': 'html',
'title': fprops.get('title'),
})
# then look for link rel="alternate"
for link in parsed.get('alternates', []):
if link.get('type') in xml_feed_types:
feeds.append({
'origin': origin,
'feed': link.get('url'),
'type': 'xml',
'title': link.get('title'),
})
return feeds
@views.app_template_filter()
def prettify_url(url):
parsed = urllib.parse.urlparse(url)
if parsed.path:
return parsed.netloc + parsed.path
return parsed.netloc
@views.app_template_filter()
def domain_for_url(url):
parsed = urllib.parse.urlparse(url)
return parsed.netloc
@views.app_template_filter()
def favicon_for_url(url):
parsed = urllib.parse.urlparse(url)
return '//www.google.com/s2/favicons?domain={}'.format(parsed.netloc)
@views.app_template_filter()
def relative_time(dt):
if dt:
now = datetime.datetime.utcnow()
diff = now - dt
zero = datetime.timedelta(0)
if diff == zero:
pretty = 'Right now'
elif diff > zero:
years = diff.days // 365
hours = diff.seconds // 60 // 60
minutes = diff.seconds // 60
if years > 1:
pretty = str(years) + ' years ago'
elif diff.days == 1:
pretty = 'A day ago'
elif diff.days > 1:
pretty = str(diff.days) + ' days ago'
elif hours == 1:
pretty = 'An hour ago'
elif hours > 1:
pretty = str(hours) + ' hours ago'
elif minutes == 1:
pretty = 'A minute ago'
elif minutes > 1:
pretty = str(minutes) + ' minutes ago'
else:
pretty = str(diff.seconds) + ' seconds ago'
else:
diff = abs(diff)
years = diff.days // 365
hours = diff.seconds // 60 // 60
minutes = diff.seconds // 60
if years > 1:
pretty = str(years) + ' years from now'
elif diff.days == 1:
pretty = 'A day from now'
elif diff.days > 1:
pretty = str(diff.days) + ' days from now'
elif hours == 1:
pretty = 'An hour from now'
elif hours > 1:
pretty = str(hours) + ' hours from now'
elif minutes == 1:
pretty = 'A minute from now'
elif minutes > 1:
pretty = str(minutes) + ' minutes from now'
else:
pretty = str(diff.seconds) + ' seconds from now'
return '<time datetime="{}">{}</time>'.format(dt.isoformat(), pretty)
@views.app_template_filter()
def isoformat(dt):
return dt and dt.isoformat()
@views.app_template_filter()
def add_preview(content):
"""If a post ends with the URL of a known media source (youtube,
instagram, etc.), add the content inline.
"""
if any('<' + tag in content for tag in (
'img', 'iframe', 'embed', 'audio', 'video')):
# don't add a preview to a post that already has one
return content
instagram_regex = 'https?://instagram.com/p/[\w\-]+/?'
vimeo_regex = 'https?://vimeo.com/(\d+)/?'
youtube_regex = 'https?://(?:www.)youtube.com/watch\?v=([\w\-]+)'
m = re.search(instagram_regex, content)
if m:
ig_url = m.group(0)
media_url = urllib.parse.urljoin(ig_url, 'media/?size=l')
return '{}<a href="{}"><img src="{}" /></a>'.format(
content, ig_url, media_url)
m = re.search(vimeo_regex, content)
if m:
# vimeo_url = m.group(0)
vimeo_id = m.group(1)
return (
'{}<iframe src="//player.vimeo.com/video/{}" width="560" '
'height="315" frameborder="0" webkitallowfullscreen '
'mozallowfullscreen allowfullscreen></iframe>'
).format(content, vimeo_id)
m = re.search(youtube_regex, content)
if m:
youtube_id = m.group(1)
return (
'{}<iframe width="560" height="315" '
'src="https://www.youtube.com/embed/{}" frameborder="0" '
'allowfullscreen></iframe>'
).format(content, youtube_id)
return content
@views.app_template_global()
def url_for_other_page(page):
"""http://flask.pocoo.org/snippets/44/#URL+Generation+Helper
"""
args = flask.request.view_args.copy()
args.update(flask.request.args)
args['page'] = page
return flask.url_for(flask.request.endpoint, **args)
def dedupe_copies(entries):
all_copies = set()
for entry in entries:
syndurls = entry.get_property('syndication')
if syndurls:
copies = [e for e in entries if e.permalink in syndurls]
entry._syndicated_copies = copies
all_copies.update(copies)
return [e for e in entries if e not in all_copies]
@views.app_template_test('syndicated_to')
def is_syndicated_to(entry, target):
def same_domain(u1, u2):
return domain_for_url(u1) == domain_for_url(u2)
return same_domain(entry.permalink, target) or any(
same_domain(syndurl, target)
for syndurl in entry.get_property('syndication', []))
|
|
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.log import Logger
from txweb2.client.http import ClientRequest
from txweb2.dav.util import allDataFromStream, joinURL
from txweb2.http import Response
from txweb2.http_headers import MimeType
from txweb2.stream import MemoryStream
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.client.geturl import getURL
from twistedcaldav.config import ConfigurationError
from twistedcaldav.simpleresource import SimpleResource, SimpleDataResource
from txdav.caldav.datastore.scheduling.ischedule.utils import lookupDataViaTXT, \
lookupServerViaSRV
from Crypto.Hash import SHA, SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import base64
import hashlib
import os
import textwrap
import time
import uuid
"""
DKIM HTTP message generation and validation,
"""
log = Logger()
# DKIM/iSchedule Constants
RSA1 = "rsa-sha1"
RSA256 = "rsa-sha256"
Q_DNS = "dns/txt"
Q_HTTP = "http/well-known"
Q_PRIVATE = "private-exchange"
KEY_SERVICE_TYPE = "ischedule"
# Headers
DKIM_SIGNATURE = "DKIM-Signature"
ISCHEDULE_VERSION = "iSchedule-Version"
ISCHEDULE_VERSION_VALUE = "1.0"
ISCHEDULE_MESSAGE_ID = "iSchedule-Message-ID"
ISCHEDULE_CAPABILITIES = "iSchedule-Capabilities"
class DKIMUtils(object):
"""
Some useful functions.
"""
@staticmethod
def validConfiguration(config):
if config.Scheduling.iSchedule.DKIM.Enabled:
if not config.Scheduling.iSchedule.DKIM.Domain and not config.ServerHostName:
msg = "DKIM: No domain specified"
log.error(msg)
raise ConfigurationError(msg)
if not config.Scheduling.iSchedule.DKIM.KeySelector:
msg = "DKIM: No selector specified"
log.error(msg)
raise ConfigurationError(msg)
if config.Scheduling.iSchedule.DKIM.SignatureAlgorithm not in (RSA1, RSA256):
msg = "DKIM: Invalid algorithm: %s" % (config.Scheduling.iSchedule.SignatureAlgorithm,)
log.error(msg)
raise ConfigurationError(msg)
try:
with open(config.Scheduling.iSchedule.DKIM.PrivateKeyFile) as f:
key_data = f.read()
except IOError, e:
msg = "DKIM: Cannot read private key file: %s %s" % (config.Scheduling.iSchedule.DKIM.PrivateKeyFile, e,)
log.error(msg)
raise ConfigurationError(msg)
try:
RSA.importKey(key_data)
except:
msg = "DKIM: Invalid private key file: %s" % (config.Scheduling.iSchedule.DKIM.PrivateKeyFile,)
log.error(msg)
raise ConfigurationError(msg)
try:
with open(config.Scheduling.iSchedule.DKIM.PublicKeyFile) as f:
key_data = f.read()
except IOError, e:
msg = "DKIM: Cannot read public key file: %s %s" % (config.Scheduling.iSchedule.DKIM.PublicKeyFile, e,)
log.error(msg)
raise ConfigurationError(msg)
try:
RSA.importKey(key_data)
except:
msg = "DKIM: Invalid public key file: %s" % (config.Scheduling.iSchedule.DKIM.PublicKeyFile,)
log.error(msg)
raise ConfigurationError(msg)
if config.Scheduling.iSchedule.DKIM.PrivateExchanges:
if not os.path.exists(config.Scheduling.iSchedule.DKIM.PrivateExchanges):
try:
os.makedirs(config.Scheduling.iSchedule.DKIM.PrivateExchanges)
except IOError, e:
msg = "DKIM: Cannot create public key private exchange directory: %s" % (config.Scheduling.iSchedule.DKIM.PrivateExchanges,)
log.error(msg)
raise ConfigurationError(msg)
if not os.path.isdir(config.Scheduling.iSchedule.DKIM.PrivateExchanges):
msg = "DKIM: Invalid public key private exchange directory: %s" % (config.Scheduling.iSchedule.DKIM.PrivateExchanges,)
log.error(msg)
raise ConfigurationError(msg)
PublicKeyLookup_PrivateExchange.directory = config.Scheduling.iSchedule.DKIM.PrivateExchanges
log.info("DKIM: Enabled")
else:
log.info("DKIM: Disabled")
@staticmethod
def getConfiguration(config):
"""
Return a tuple of the parameters derived from the config that are used to initialize the DKIMRequest.
@param config: configuration to look at
@type config: L{Config}
"""
domain = config.Scheduling.iSchedule.DKIM.Domain if config.Scheduling.iSchedule.DKIM.Domain else config.ServerHostName
selector = config.Scheduling.iSchedule.DKIM.KeySelector
key_file = config.Scheduling.iSchedule.DKIM.PrivateKeyFile
algorithm = config.Scheduling.iSchedule.DKIM.SignatureAlgorithm
useDNSKey = config.Scheduling.iSchedule.DKIM.UseDNSKey
useHTTPKey = config.Scheduling.iSchedule.DKIM.UseHTTPKey
usePrivateExchangeKey = config.Scheduling.iSchedule.DKIM.UsePrivateExchangeKey
expire = config.Scheduling.iSchedule.DKIM.ExpireSeconds
return domain, selector, key_file, algorithm, useDNSKey, useHTTPKey, usePrivateExchangeKey, expire
@staticmethod
def hashlib_method(algorithm):
"""
Return hashlib function for DKIM algorithm.
"""
return {
RSA1: hashlib.sha1,
RSA256: hashlib.sha256,
}[algorithm]
@staticmethod
def hash_name(algorithm):
"""
Return RSA hash name for DKIM algorithm.
"""
return {
RSA1: "SHA-1",
RSA256: "SHA-256",
}[algorithm]
@staticmethod
def hash_func(algorithm):
"""
Return RSA hash name for DKIM algorithm.
"""
return {
RSA1: SHA,
RSA256: SHA256,
}[algorithm]
@staticmethod
def extractTags(data):
"""
Split a DKIM tag list into a dict, removing unneeded whitespace.
"""
# Extract tags from the data
splits = [item.strip() for item in data.split(";")]
dkim_tags = {}
for item in splits:
try:
name, value = item.split("=", 1)
dkim_tags[name.strip()] = value.strip()
except ValueError:
pass
return dkim_tags
@staticmethod
def canonicalizeHeader(name, value, dkim_tags=None, method="ischedule-relaxed"):
if method == "relaxed":
return DKIMUtils.relaxedHeader(name, value, dkim_tags)
elif method == "ischedule-relaxed":
return DKIMUtils.ischeduleHeader(name, value, dkim_tags)
else:
assert "Invalid header canonicalization method: %s" % (method,)
@staticmethod
def relaxedHeader(name, value, dkim_tags=None):
"""
Canonicalize the header using "relaxed" method. Optionally remove the b= value from
any DKIM-Signature present.
FIXME: this needs to be smarter about where valid WSP can occur in a header. Right now it will
blindly collapse all runs of SP/HTAB into a single SP. That could be wrong if a legitimate sequence of
SP/HTAB occurs in a header value.
@param name: header name
@type name: C{str}
@param value: header value
@type value: C{str}
@param dkim_tags: the extracted DKIM tags, or C{None} if no removal needed
@type dkim_tags: C{dict} or C{None}
"""
# Special case DKIM-Signature: remove the b= value for signature
name = name.lower()
if dkim_tags is not None and name == DKIM_SIGNATURE.lower():
value = DKIMUtils.canonicalizeDKIMHeaderFields(value, dkim_tags)
# Basic relaxed behavior
value = " ".join(value.split())
crlf = "" if name == DKIM_SIGNATURE.lower() else "\r\n"
return "%s:%s%s" % (name, value, crlf)
@staticmethod
def ischeduleHeader(name, value, dkim_tags=None):
"""
Canonicalize the header using "ischedule-relaxed" method. Optionally remove the b= value from
any DKIM-Signature present.
FIXME: this needs to be smarter about where valid WSP can occur in a header. Right now it will
blindly collapse all runs of SP/HTAB into a single SP. That could be wrong if a legitimate sequence of
SP/HTAB occurs in a header value.
@param name: header name
@type name: C{str}
@param value: header value
@type value: C{str}
@param dkim_tags: the extracted DKIM tags, or C{None} if no removal needed
@type dkim_tags: C{dict} or C{None}
"""
# Special case DKIM-Signature: remove the b= value for signature
name = name.lower()
if dkim_tags is not None and name == DKIM_SIGNATURE.lower():
value = DKIMUtils.canonicalizeDKIMHeaderFields(value, dkim_tags)
# Basic relaxed behavior
value = " ".join(value.split())
value = value.replace(" ,", ",")
value = value.replace(", ", ",")
crlf = "" if name == DKIM_SIGNATURE.lower() else "\r\n"
return "%s:%s%s" % (name, value, crlf)
@staticmethod
def canonicalizeDKIMHeaderFields(value, dkim_tags):
"""
DKIM-Signature b= value needs to be stripped.
@param value: header value to process
@type value: C{str}
"""
pos = value.find(dkim_tags["b"])
value = value[:pos] + value[pos + len(dkim_tags["b"]):]
value = " ".join(value.split())
return value
@staticmethod
def canonicalizeBody(data):
"""
DKIM simple body canonicalization: remove empty lines at the end
and ensure it ends with one \r\n.
@param data: data to canonicalize
@type data: L{str}
"""
while data.endswith("\r\n"):
data = data[:-2]
data += "\r\n"
return data
@staticmethod
def sign(data, privkey, hashfunc):
h = hashfunc.new(data)
signer = PKCS1_v1_5.new(privkey)
return base64.b64encode(signer.sign(h))
@staticmethod
def verify(data, signature, pubkey, hashfunc):
h = hashfunc.new(data)
verifier = PKCS1_v1_5.new(pubkey)
if not verifier.verify(h, base64.b64decode(signature)):
raise ValueError()
class DKIMRequest(ClientRequest):
"""
A ClientRequest that optionally creates a DKIM signature.
"""
keys = {}
def __init__(
self,
method,
uri,
headers,
stream,
domain,
selector,
key_file,
algorithm,
sign_headers,
useDNSKey,
useHTTPKey,
usePrivateExchangeKey,
expire,
):
"""
Create a DKIM request, which is a regular client request with the additional information needed to sign the message.
@param method: HTTP method to use
@type method: C{str}
@param uri: request-URI
@type uri: C{str}
@param headers: request headers
@type headers: L{http_headers}
@param stream: body data
@type stream: L{Stream}
@param domain: the signing domain
@type domain: C{str}
@param selector: the signing key selector
@type selector: C{str}
@param key_file: path to a private key file
@type key_file: C{str}
@param algorithm: the signing algorithm to use
@type algorithm: C{str}
@param sign_headers: list of header names to sign - to "over sign" a header append a "+" to the name
@type sign_headers: C{tuple}
@param useDNSKey: whether or not to add DNS TXT lookup as a key lookup option
@type useDNSKey: C{bool}
@param useHTTPKey: whether or not to add HTTP .well-known as a key lookup option
@type useHTTPKey: C{bool}
@param usePrivateExchangeKey: whether or not to add private-exchange as a key lookup option
@type usePrivateExchangeKey: C{bool}
@param expire: number of seconds to expiration of signature
@type expire: C{int}
"""
super(DKIMRequest, self).__init__(method, uri, headers, stream)
self.domain = domain
self.selector = selector
self.algorithm = algorithm
self.key_file = key_file
self.sign_headers = sign_headers
self.time = str(int(time.time()))
self.expire = str(int(time.time() + expire))
assert self.domain
assert self.selector
assert self.algorithm in (RSA1, RSA256,)
assert useDNSKey or useHTTPKey or usePrivateExchangeKey
self.hash_method = DKIMUtils.hashlib_method(self.algorithm)
self.hash_name = DKIMUtils.hash_name(self.algorithm)
self.hash_func = DKIMUtils.hash_func(self.algorithm)
self.keyMethods = []
if usePrivateExchangeKey:
self.keyMethods.append(Q_PRIVATE)
if useHTTPKey:
self.keyMethods.append(Q_HTTP)
if useDNSKey:
self.keyMethods.append(Q_DNS)
self.message_id = str(uuid.uuid4())
@inlineCallbacks
def sign(self):
"""
Generate the DKIM headers by signing the request. This should only be called once on the request and there must
be no changes to the request (no headers, no body change) after it is called.
"""
# Get the headers and the DKIM-Signature tags
headers, dkim_tags = (yield self.signatureHeaders())
# Sign the hash
signature = self.generateSignature(headers)
# Complete the header
dkim_tags[-1] = ("b", signature,)
dkim_header = "; ".join(["%s=%s" % item for item in dkim_tags])
self.headers.addRawHeader(DKIM_SIGNATURE, dkim_header)
log.debug("DKIM: Generated header: DKIM-Signature:{hdr}", hdr=dkim_header)
log.debug("DKIM: Signed headers:\n{hdrs}", hdrs=headers)
returnValue(signature)
@inlineCallbacks
def bodyHash(self):
"""
Generate the hash of the request body data.
"""
# We need to play a trick with the request stream as we can only read it once. So we
# read it, store the value in a MemoryStream, and replace the request's stream with that,
# so the data can be read again.
data = (yield allDataFromStream(self.stream))
self.stream = MemoryStream(data if data is not None else "")
self.stream.doStartReading = None
returnValue(base64.b64encode(self.hash_method(DKIMUtils.canonicalizeBody(data)).digest()))
@inlineCallbacks
def signatureHeaders(self):
"""
Generate the headers that are going to be signed as well as the DKIM-Signature tags.
"""
# Make sure we have the required iSchedule headers
self.headers.addRawHeader(ISCHEDULE_VERSION, ISCHEDULE_VERSION_VALUE)
self.headers.addRawHeader(ISCHEDULE_MESSAGE_ID, self.message_id)
self.sign_headers += (ISCHEDULE_VERSION, ISCHEDULE_MESSAGE_ID,)
# Need Cache-Control
self.headers.setRawHeaders("Cache-Control", ("no-cache", "no-transform",))
# Figure out all the existing headers to sign
headers = []
sign_headers = []
raw = dict([(name.lower(), values) for name, values in self.headers.getAllRawHeaders()])
for name in self.sign_headers:
# ischedule-relaxed canonicalization requires headers with the same name concatenated
# with a comma in between
value = ",".join(raw.get(name.lower(), ()))
headers.append(DKIMUtils.canonicalizeHeader(name, value))
sign_headers.append(name)
# Generate the DKIM header tags we care about
dkim_tags = []
dkim_tags.append(("v", "1",))
dkim_tags.append(("d", self.domain,))
dkim_tags.append(("s", self.selector,))
dkim_tags.append(("t", self.time,))
dkim_tags.append(("x", self.expire,))
dkim_tags.append(("a", self.algorithm,))
dkim_tags.append(("q", ":".join(self.keyMethods),))
dkim_tags.append(("c", "ischedule-relaxed/simple",))
dkim_tags.append(("h", ":".join(sign_headers),))
dkim_tags.append(("bh", (yield self.bodyHash()),))
dkim_tags.append(("b", "",))
dkim_header = "; ".join(["%s=%s" % item for item in dkim_tags])
headers.append(DKIMUtils.canonicalizeHeader(DKIM_SIGNATURE, dkim_header))
headers = "".join(headers)
returnValue((headers, dkim_tags,))
def generateSignature(self, headers):
# Sign the hash
if self.key_file not in self.keys:
with open(self.key_file) as f:
key = f.read()
self.keys[self.key_file] = RSA.importKey(key)
return DKIMUtils.sign(headers, self.keys[self.key_file], self.hash_func)
class DKIMMissingError(Exception):
"""
Used to indicate that the DKIM-Signature header is not present when
attempting verification.
"""
pass
class DKIMVerificationError(Exception):
"""
Used to indicate a DKIM verification error.
"""
pass
class DKIMVerifier(object):
"""
Class used to verify an DKIM-signed HTTP request.
"""
def __init__(self, headers, body, key_lookup=None, protocol_debug=False):
"""
@param headers: The HTTP request headers to process
@type headers: L{txweb2.http_headers.Headers}
@param body: The HTTP request body to process
@type body: C{str}
"""
self.headers = headers
self.body = body
self._debug = protocol_debug
self.dkim_tags = {}
# Prefer private exchange over HTTP over DNS when multiple are present
self.key_lookup_methods = (
PublicKeyLookup_PrivateExchange,
PublicKeyLookup_HTTP_WellKnown,
PublicKeyLookup_DNSTXT,
) if key_lookup is None else key_lookup
self.time = int(time.time())
@inlineCallbacks
def verify(self):
"""
@raise: DKIMVerificationError
"""
# Check presence of DKIM header
self.processDKIMHeader()
# Extract the set of canonicalized headers being signed
headers = self.extractSignedHeaders()
log.debug("DKIM: Signed headers:\n{hdrs}", hdrs=headers)
# Locate the public key
pubkey = (yield self.locatePublicKey())
if pubkey is None:
raise DKIMVerificationError("No public key to verify the DKIM signature")
# Do header verification
try:
DKIMUtils.verify(headers, self.dkim_tags["_b"], pubkey, self.hash_func)
except ValueError:
msg = "Could not verify signature"
_debug_msg = """
DKIM-Signature:%s
Headers to evaluate:
%s
Public key used:
%s
""" % (self.headers.getRawHeaders(DKIM_SIGNATURE)[0], headers, pubkey._original_data,)
log.debug("DKIM: {msg}:{debug}", msg=msg, debug=_debug_msg)
if self._debug:
msg = "%s:%s" % (msg, _debug_msg,)
raise DKIMVerificationError(msg)
# Do body validation
body = DKIMUtils.canonicalizeBody(self.body)
bh = base64.b64encode(self.hash_method(body).digest())
if bh != self.dkim_tags["_bh"]:
msg = "Could not verify the DKIM body hash"
_debug_msg = """
DKIM-Signature:%s
Hash Method: %s
Base64 encoded body:
%s
""" % (self.headers.getRawHeaders(DKIM_SIGNATURE), self.hash_method.__name__, base64.b64encode(body),)
log.debug("DKIM: {msg}:{debug}", msg=msg, debug=_debug_msg)
if self._debug:
msg = "%s:%s" % (msg, _debug_msg,)
raise DKIMVerificationError(msg)
def processDKIMHeader(self):
"""
Extract the DKIM-Signature header and process the tags.
@raise: DKIMVerificationError
"""
# Check presence of header
dkim = self.headers.getRawHeaders(DKIM_SIGNATURE)
if dkim is None:
msg = "No DKIM-Signature header present in the request"
log.debug("DKIM: {msg}", msg=msg)
raise DKIMMissingError(msg)
if len(dkim) != 1:
# TODO: This might need to be changed if we ever support forwarding of iSchedule messages - the forwarder
# might also sign the message and add its own header
msg = "Only one DKIM-Signature allowed in the request"
log.debug("DKIM: {msg}", msg=msg)
raise DKIMVerificationError(msg)
dkim = dkim[0]
log.debug("DKIM: Found header: DKIM-Signature:{hdr}", hdr=dkim)
# Extract tags from the header
self.dkim_tags = DKIMUtils.extractTags(dkim)
# Verify validity of tags
required_tags = ("v", "a", "b", "bh", "c", "d", "h", "s",)
for tag in required_tags:
if tag not in self.dkim_tags:
msg = "Missing DKIM-Signature tag: %s" % (tag,)
log.debug("DKIM: {msg}", msg=msg)
raise DKIMVerificationError(msg)
check_values = {
"v": ("1",),
"a": (RSA1, RSA256,),
"c": ("ischedule-relaxed", "ischedule-relaxed/simple",),
"q": (Q_DNS, Q_HTTP, Q_PRIVATE,),
}
for tag, values in check_values.items():
if tag not in required_tags and tag not in self.dkim_tags:
pass
# Handle some structured values
if tag == "q":
test = self.dkim_tags[tag].split(":")
else:
test = (self.dkim_tags[tag],)
for item in test:
if item not in values:
msg = "Tag: %s has incorrect value: %s" % (tag, self.dkim_tags[tag],)
log.debug("DKIM: {msg}", msg=msg)
raise DKIMVerificationError(msg)
# Check time stamp
if "t" in self.dkim_tags and self.time:
diff_time = self.time - int(self.dkim_tags["t"])
if diff_time < -360:
msg = "Signature time too far in the future: %d seconds" % (diff_time,)
log.debug("DKIM: {msg}", msg=msg)
raise DKIMVerificationError(msg)
# Check expiration
if "x" in self.dkim_tags and self.time:
diff_time = self.time - int(self.dkim_tags["x"])
if diff_time > 0:
msg = "Signature expired: %d seconds" % (diff_time,)
log.debug("DKIM: {msg}", msg=msg)
raise DKIMVerificationError(msg)
# Base64 encoded tags might include WSP which we need to ignore
for tag in ("b", "bh",):
self.dkim_tags["_%s" % (tag,)] = "".join(self.dkim_tags[tag].split())
# Some useful bits
self.hash_method = DKIMUtils.hashlib_method(self.dkim_tags["a"])
self.hash_func = DKIMUtils.hash_func(self.dkim_tags["a"])
self.key_methods = self.dkim_tags["q"].split(":")
def extractSignedHeaders(self):
"""
Extract the set of headers from the request that are supposed to be signed. Canonicalize them
and return the expected signed data.
"""
# Extract all the expected signed headers taking into account multiple occurrences of a header
# which get concatenated with a single comma in between.
header_list = [hdr.strip() for hdr in self.dkim_tags["h"].split(":")]
headers = []
for header in header_list:
actual_headers = self.headers.getRawHeaders(header)
if actual_headers:
headers.append((header, ",".join(actual_headers),))
# DKIM-Signature is always included at the end
headers.append((DKIM_SIGNATURE, self.headers.getRawHeaders(DKIM_SIGNATURE)[0],))
# Now canonicalize the values
return "".join([DKIMUtils.canonicalizeHeader(name, value, dkim_tags=self.dkim_tags) for name, value in headers])
@inlineCallbacks
def locatePublicKey(self):
"""
Try to lookup the public key matching the signature.
"""
for lookup in self.key_lookup_methods:
if lookup.method in self.key_methods or lookup.method == "*":
pubkey = (yield lookup(self.dkim_tags).getPublicKey())
if pubkey is not None:
returnValue(pubkey)
else:
returnValue(None)
class PublicKeyLookup(object):
"""
Abstract base class for public key lookup methods.
The L{method} attribute indicated the DKIM q= lookup method that the class will support, or if set to "*",
the class will handle any q= value.
"""
keyCache = {}
method = None
def __init__(self, dkim_tags):
self.dkim_tags = dkim_tags
@inlineCallbacks
def getPublicKey(self, useCache=True):
"""
Get key from cache or directly do query.
@param useCache: whether or not to use the cache
@type useCache: C{bool}
"""
key = self._getSelectorKey()
if key not in PublicKeyLookup.keyCache or not useCache:
pubkeys = (yield self._lookupKeys())
PublicKeyLookup.keyCache[key] = pubkeys
returnValue(self._selectKey())
def _getSelectorKey(self):
"""
Get a token used to uniquely identify the key being looked up. Token format will
depend on the lookup method.
"""
raise NotImplementedError
def _lookupKeys(self):
"""
Do the key lookup using the actual lookup method. Return a C{list} of C{dict}
that contains the key tag-list. Return a L{Deferred}.
"""
raise NotImplementedError
def _selectKey(self):
"""
Select a specific key from the list that best matches the DKIM-Signature tags
"""
pubkeys = PublicKeyLookup.keyCache.get(self._getSelectorKey(), [])
for pkey in pubkeys:
# Check validity
if pkey.get("v", "DKIM1") != "DKIM1":
continue
# Check key type
if pkey.get("k", "rsa") != "rsa":
continue
# Check valid hash algorithms
hashes = set([hash.strip() for hash in pkey.get("h", "sha1:sha256").split(":")])
if self.dkim_tags["a"][4:] not in hashes:
continue
# Service type
if pkey.get("s", KEY_SERVICE_TYPE) not in ("*", KEY_SERVICE_TYPE,):
continue
# Non-revoked key
if len(pkey.get("p", "")) == 0:
continue
return self._makeKey(pkey)
log.debug("DKIM: No valid public key: {sel} {keys}", sel=self._getSelectorKey(), keys=pubkeys)
return None
def _makeKey(self, pkey):
"""
Turn the key tag list into an actual RSA public key object
@param pkey: key tag list
@type pkey: C{list}
"""
key_data = """-----BEGIN PUBLIC KEY-----
%s
-----END PUBLIC KEY-----
""" % ("\n".join(textwrap.wrap(pkey["p"], 64)),)
try:
key = RSA.importKey(key_data)
key._original_data = key_data
return key
except:
log.debug("DKIM: Unable to make public key:\n{key}", key=key_data)
return None
@staticmethod
def flushCache():
PublicKeyLookup.keyCache = {}
class PublicKeyLookup_DNSTXT(PublicKeyLookup):
method = Q_DNS
def _getSelectorKey(self):
"""
Get a token used to uniquely identify the key being looked up. Token format will
depend on the lookup method.
"""
return "%s._domainkey.%s" % (self.dkim_tags["s"], self.dkim_tags["d"],)
@inlineCallbacks
def _lookupKeys(self):
"""
Do the key lookup using the actual lookup method.
"""
log.debug("DKIM: TXT lookup: {key}", key=self._getSelectorKey())
data = (yield lookupDataViaTXT(self._getSelectorKey()))
log.debug("DKIM: TXT lookup results: {key}\n{data}", key=self._getSelectorKey(), data="\n".join(data))
returnValue(tuple([DKIMUtils.extractTags(line) for line in data]))
class PublicKeyLookup_HTTP_WellKnown(PublicKeyLookup):
method = Q_HTTP
def _getSelectorKey(self):
"""
Get a token used to uniquely identify the key being looked up. Token format will
depend on the lookup method.
"""
host = ".".join(self.dkim_tags["d"].split(".")[-2:])
return "https://%s/.well-known/domainkey/%s/%s" % (host, self.dkim_tags["d"], self.dkim_tags["s"],)
@inlineCallbacks
def _getURI(self):
"""
Determine the well-known URI for the public key service.
"""
# First we do an SRV lookup for _domainkey to get the public key server host/port
result = (yield lookupServerViaSRV(self.dkim_tags["d"], service="_domainkey_lookup"))
if result is None:
log.debug("DKIM: SRV _domainkey failed on: {tag} trying domain directly", tag=self.dkim_tags["d"])
host = self.dkim_tags["d"]
port = ""
scheme = "https"
else:
host, port = result
scheme = "http" if port in (80, 8008, 8080,) else "https"
if port == 80 and scheme == "http" or port == 443 and scheme == "https":
port = ""
else:
port = ":%s" % (port,)
returnValue("%s://%s%s/.well-known/domainkey/%s/%s" % (scheme, host, port, self.dkim_tags["d"], self.dkim_tags["s"],))
@inlineCallbacks
def _lookupKeys(self):
"""
Do the key lookup using the actual lookup method.
"""
# First we do an SRV lookup for _domainkey to get the public key server URI
uri = (yield self._getURI())
log.debug("DKIM: HTTP/.well-known lookup: {uri}", uri=uri)
response = (yield getURL(uri))
if response is None or response.code / 100 != 2:
log.debug("DKIM: Failed http/well-known lookup: {uri} {resp}", uri=uri, resp=response)
returnValue(())
ct = response.headers.getRawHeaders("content-type", ("bogus/type",))[0]
ct = ct.split(";", 1)
ct = ct[0].strip()
if ct not in ("text/plain",):
log.debug("DKIM: Failed http/well-known lookup: wrong content-type returned {uri} {ct}", uri=uri, ct=ct)
returnValue(())
log.debug("DKIM: HTTP/.well-known lookup results: {uri}\n{resp}", uri=uri, resp=response.data)
returnValue(tuple([DKIMUtils.extractTags(line) for line in response.data.splitlines()]))
class PublicKeyLookup_PrivateExchange(PublicKeyLookup):
method = Q_PRIVATE
directory = None
def _getSelectorKey(self):
"""
Get a token used to uniquely identify the key being looked up. Token format will
depend on the lookup method.
"""
return "%s#%s" % (self.dkim_tags["d"], self.dkim_tags["s"],)
def _lookupKeys(self):
"""
Key information is stored in a file, one record per line.
"""
# Check validity of paths
if PublicKeyLookup_PrivateExchange.directory is None:
log.debug("DKIM: Failed private-exchange lookup: no directory configured")
return succeed(())
keyfile = os.path.join(PublicKeyLookup_PrivateExchange.directory, self._getSelectorKey())
if not os.path.exists(keyfile):
log.debug("DKIM: Failed private-exchange lookup: no path {path}", path=keyfile)
return succeed(())
# Now read the data
log.debug("DKIM: Private exchange lookup: {path}", path=keyfile)
try:
with open(keyfile) as f:
keys = f.read()
except IOError, e:
log.debug("DKIM: Failed private-exchange lookup: could not read {path} {ex}", path=keyfile, ex=e)
return succeed(())
log.debug("DKIM: Private exchange lookup results: {path}\n{keys}", path=keyfile, keys=keys)
return succeed(tuple([DKIMUtils.extractTags(line) for line in keys.splitlines()]))
class DomainKeyResource (SimpleResource):
"""
Domainkey well-known resource.
"""
def __init__(self, domain, selector, pubkeyfile):
"""
"""
assert domain
assert selector
SimpleResource.__init__(self, principalCollections=None, isdir=True, defaultACL=SimpleResource.allReadACL)
self.makeKeyData(domain, selector, pubkeyfile)
self.domain = domain
self.selector = selector
def makeKeyData(self, domain, selector, pubkeyfile):
"""
Check that a valid key exists, create the TXT record format data and make the needed child resources.
"""
# Get data from file
try:
with open(pubkeyfile) as f:
key_data = f.read()
except IOError, e:
log.error("DKIM: Unable to open the public key file: {path} because of {ex}", path=pubkeyfile, ex=e)
raise
# Make sure we can parse a valid public key
try:
RSA.importKey(key_data)
except:
log.error("DKIM: Invalid public key file: {path}", path=pubkeyfile)
raise
# Make the TXT record
key_data = "".join(key_data.strip().splitlines()[1:-1])
txt_data = "v=DKIM1; s=ischedule; p=%s\n" % (key_data,)
# Setup resource hierarchy
domainResource = SimpleResource(principalCollections=None, isdir=True, defaultACL=SimpleResource.allReadACL)
self.putChild(domain, domainResource)
selectorResource = SimpleDataResource(principalCollections=None, content_type=MimeType.fromString("text/plain"), data=txt_data, defaultACL=SimpleResource.allReadACL)
domainResource.putChild(selector, selectorResource)
def contentType(self):
return MimeType.fromString("text/html; charset=utf-8")
def render(self, request):
output = """<html>
<head>
<title>DomainKey Resource</title>
</head>
<body>
<h1>DomainKey Resource.</h1>
<a href="%s">Domain: %s<br>
Selector: %s</a>
</body
</html>""" % (joinURL(request.uri, self.domain, self.selector), self.domain, self.selector,)
response = Response(200, {}, output)
response.headers.setHeader("content-type", MimeType("text", "html"))
return response
|
|
from falcon.http_error import HTTPError
from apiRequests.RequestCodes import Code, HTTP_809
class Error(Exception):
def __init__(self, code, message):
super(Error, self).__init__({'code': code, 'message': message})
# Mongodb's errors 1 - 999
class MongodbConnectionFail(Error):
_CODE = 1
_DEFAULT_MSG = 'Something was wrong trying to connect to Mongodb'
def __init__(self, message=_DEFAULT_MSG):
super(MongodbConnectionFail, self).__init__(self._CODE, message)
class MongodbInvalidDatabase(Error):
_CODE = 2
_DEFAULT_MSG = 'Invalid database name.'
def __init__(self, message=_DEFAULT_MSG):
super(MongodbInvalidDatabase, self).__init__(self._CODE, message)
class MongodbInvalidCollection(Error):
_CODE = 2
_DEFAULT_MSG = 'Invalid database name.'
def __init__(self, message=_DEFAULT_MSG):
super(MongodbInvalidCollection, self).__init__(self._CODE, message)
class MongodbInvalidCredentials(Error):
_CODE = 3
_DEFAULT_MSG = 'The user or password is invalid.'
def __init__(self, message=_DEFAULT_MSG):
super(MongodbInvalidCredentials, self).__init__(self._CODE, message)
# Topology's errors 1000 - 1999
class TopologyInvalidId(Error):
_CODE = 1000
_DEFAULT_MSG = 'The topology\'s must be defined.'
def __init__(self, message=_DEFAULT_MSG):
super(TopologyInvalidId, self).__init__(self._CODE, message)
# Topology's errors 1000 - 1999
class TopologyNotInCluster(Error):
_CODE = 1001
_DEFAULT_MSG = 'The topology\'s is not in cluster.'
def __init__(self, message=_DEFAULT_MSG):
super(TopologyNotInCluster, self).__init__(self._CODE, message)
# Module's errors 2000 - 2999
class ModuleException(Error):
_CODE = 2000
_DEFAULT_MSG = 'The module\'s must be defined.'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleException, self).__init__(self._CODE, message)
class ModuleVersionInvalidId(Error):
_CODE = 2001
_DEFAULT_MSG = 'The module\'s version code must be defined.'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleVersionInvalidId, self).__init__(self._CODE, message)
class ModuleInvalidType(Error):
_CODE = 2002
_DEFAULT_MSG = 'Invalid module type. Allow type are spout, bolt and drain.'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleInvalidType, self).__init__(self._CODE, message)
class ModuleInvalidLanguage(Error):
_CODE = 2003
_DEFAULT_MSG = 'Invalid module type. Allow type are python and java.'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleInvalidLanguage, self).__init__(self._CODE, message)
class ModuleErrorCompilingException(Error):
_CODE = 2004
def __init__(self, trace):
super(ModuleErrorCompilingException, self).__init__(self._CODE, 'Error compiling... Trace: ' + trace)
class ModuleWarningsCompilingException(Error):
_CODE = 2005
def __init__(self, trace):
super(ModuleWarningsCompilingException, self).__init__(self._CODE, 'Warnings compiling... Trace: ' + trace)
class ModuleInvalidVersion(Error):
_CODE = 2006
_DEFAULT_MSG = 'The module\'s version must be defined or we can\'t find the version.'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleInvalidVersion, self).__init__(self._CODE, message)
class ModuleWriteException(Error):
_CODE = 2007
_DEFAULT_MSG = 'Something was wrong trying to write the module in a file.'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleWriteException, self).__init__(self._CODE, message)
class ModuleInvalidId(Error):
_CODE = 2008
_DEFAULT_MSG = 'The module\'s id is required'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleInvalidId, self).__init__(self._CODE, message)
class ModuleInvalidName(Error):
_CODE = 2009
_DEFAULT_MSG = 'The module\'s name is required'
def __init__(self, message=_DEFAULT_MSG):
super(ModuleInvalidName, self).__init__(self._CODE, message)
class GistHandlerUrlException(Error):
_CODE = 3000
_DEFAULT_MSG = 'The url is not form gist.github.com'
def __init__(self, message=_DEFAULT_MSG):
super(GistHandlerUrlException, self).__init__(self._CODE, message)
class GistHandlerIdInvalid(Error):
_CODE = 3001
_DEFAULT_MSG = 'No such gist found'
def __init__(self, message=_DEFAULT_MSG):
super(GistHandlerIdInvalid, self).__init__(self._CODE, message)
class TemplateNotFound(Error):
_CODE = 4000
_DEFAULT_MSG = 'Template not found'
def __init__(self, message=_DEFAULT_MSG):
super(TemplateNotFound, self).__init__(self._CODE, message)
class PathException(Error):
_CODE = 4001
_DEFAULT_MSG = 'Path not exits'
def __init__(self, message=_DEFAULT_MSG):
super(PathException, self).__init__(self._CODE, message)
class GeneratePomFileException(Error):
_CODE = 4002
_DEFAULT_MSG = 'Error trying to generated pom.xml'
def __init__(self, message=_DEFAULT_MSG):
super(GeneratePomFileException, self).__init__(self._CODE, message)
class GenerateMainClassException(Error):
_CODE = 4003
_DEFAULT_MSG = 'Error trying to generated App.java'
def __init__(self, message=_DEFAULT_MSG):
super(GenerateMainClassException, self).__init__(self._CODE, message)
class TopologyWriteException(Error):
_CODE = 5001
_DEFAULT_MSG = 'Something was wrong trying to write the topology in a file.'
def __init__(self, message=_DEFAULT_MSG):
super(TopologyWriteException, self).__init__(self._CODE, message)
class MissingMandatoryFields(Error):
_CODE = 6000
_DEFAULT_MSG = 'Missing mandatory fields in object from db'
def __init__(self, message=_DEFAULT_MSG):
super(MissingMandatoryFields, self).__init__(self._CODE, message)
'''
HTTP Sinfonier errors
'''
class HTTPErrorsSinfonier(HTTPError):
def __init__(self, status, code, message=None, params=None):
super(HTTPErrorsSinfonier, self).__init__(status=status, code=code)
self.message = message
self.params = params
def to_dict(self, obj_type=dict):
return Code(self.code, self.status, self.params, self.message)
class HTTPBadParams(HTTPErrorsSinfonier):
def __init__(self, msg, params=None):
super(HTTPBadParams, self).__init__(HTTP_809, 809, msg, params)
|
|
from __future__ import absolute_import, print_function
import collections
import logging
import six
from django.conf import settings
from django.db import transaction
from django.utils.encoding import force_text
from sentry.utils import json
from sentry.utils.strings import truncatechars
from sentry.utils.compat import filter
def safe_execute(func, *args, **kwargs):
# TODO: we should make smart savepoints (only executing the savepoint server
# side if we execute a query)
_with_transaction = kwargs.pop("_with_transaction", True)
expected_errors = kwargs.pop("expected_errors", None)
try:
if _with_transaction:
with transaction.atomic():
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
except Exception as e:
if hasattr(func, "im_class"):
cls = func.im_class
else:
cls = func.__class__
func_name = getattr(func, "__name__", six.text_type(func))
cls_name = cls.__name__
logger = logging.getLogger("sentry.safe.%s" % (cls_name.lower(),))
if expected_errors and isinstance(e, expected_errors):
logger.info("%s.process_error_ignored", func_name, extra={"exception": e})
return
logger.error("%s.process_error", func_name, exc_info=True, extra={"exception": e})
else:
return result
def trim(
value,
max_size=settings.SENTRY_MAX_VARIABLE_SIZE,
max_depth=6,
object_hook=None,
_depth=0,
_size=0,
**kwargs
):
"""
Truncates a value to ```MAX_VARIABLE_SIZE```.
The method of truncation depends on the type of value.
"""
options = {
"max_depth": max_depth,
"max_size": max_size,
"object_hook": object_hook,
"_depth": _depth + 1,
}
if _depth > max_depth:
if not isinstance(value, six.string_types):
value = json.dumps(value)
return trim(value, _size=_size, max_size=max_size)
elif isinstance(value, dict):
result = {}
_size += 2
for k in sorted(value.keys(), key=lambda x: (len(force_text(value[x])), x)):
v = value[k]
trim_v = trim(v, _size=_size, **options)
result[k] = trim_v
_size += len(force_text(trim_v)) + 1
if _size >= max_size:
break
elif isinstance(value, (list, tuple)):
result = []
_size += 2
for v in value:
trim_v = trim(v, _size=_size, **options)
result.append(trim_v)
_size += len(force_text(trim_v))
if _size >= max_size:
break
if isinstance(value, tuple):
result = tuple(result)
elif isinstance(value, six.string_types):
result = truncatechars(value, max_size - _size)
else:
result = value
if object_hook is None:
return result
return object_hook(result)
def trim_pairs(iterable, max_items=settings.SENTRY_MAX_DICTIONARY_ITEMS, **kwargs):
max_items -= 1
result = []
for idx, item in enumerate(iterable):
key, value = item
result.append((key, trim(value, **kwargs)))
if idx > max_items:
return result
return result
def trim_dict(value, max_items=settings.SENTRY_MAX_DICTIONARY_ITEMS, **kwargs):
max_items -= 1
for idx, key in enumerate(list(iter(value))):
value[key] = trim(value[key], **kwargs)
if idx > max_items:
del value[key]
return value
def get_path(data, *path, **kwargs):
"""
Safely resolves data from a recursive data structure. A value is only
returned if the full path exists, otherwise ``None`` is returned.
If the ``default`` argument is specified, it is returned instead of ``None``.
If the ``filter`` argument is specified and the value is a list, it is
filtered with the given callback. Alternatively, pass ``True`` as filter to
only filter ``None`` values.
"""
default = kwargs.pop("default", None)
f = kwargs.pop("filter", None)
for k in kwargs:
raise TypeError("set_path() got an undefined keyword argument '%s'" % k)
for p in path:
if isinstance(data, collections.Mapping) and p in data:
data = data[p]
elif isinstance(data, (list, tuple)) and isinstance(p, int) and -len(data) <= p < len(data):
data = data[p]
else:
return default
if f and data and isinstance(data, (list, tuple)):
data = filter((lambda x: x is not None) if f is True else f, data)
return data if data is not None else default
def set_path(data, *path, **kwargs):
"""
Recursively traverses or creates the specified path and sets the given value
argument. `None` is treated like a missing value. If a non-mapping item is
encountered while traversing, the value is not set.
This function is equivalent to a recursive dict.__setitem__. Returns True if
the value was set, otherwise False.
If the ``overwrite` kwarg is set to False, the value is only set if there is
no existing value or it is None. See ``setdefault_path``.
"""
try:
value = kwargs.pop("value")
except KeyError:
raise TypeError("set_path() requires a 'value' keyword argument")
overwrite = kwargs.pop("overwrite", True)
for k in kwargs:
raise TypeError("set_path() got an undefined keyword argument '%s'" % k)
for p in path[:-1]:
if not isinstance(data, collections.Mapping):
return False
if data.get(p) is None:
data[p] = {}
data = data[p]
if not isinstance(data, collections.Mapping):
return False
p = path[-1]
if overwrite or data.get(p) is None:
data[p] = value
return True
return False
def setdefault_path(data, *path, **kwargs):
"""
Recursively traverses or creates the specified path and sets the given value
argument if it does not exist. `None` is treated like a missing value. If a
non-mapping item is encountered while traversing, the value is not set.
This function is equivalent to a recursive dict.setdefault, except for None
values. Returns True if the value was set, otherwise False.
"""
kwargs["overwrite"] = False
return set_path(data, *path, **kwargs)
|
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import configparser
import functools
import logging
import os
import os.path
import unicodedata
from typing import Any, Callable, DefaultDict, Dict, Optional, Sequence, Union
from urllib.parse import urlparse
from urllib.parse import urlunparse
import requests
import rfc3986
from twine import exceptions
# Shim for input to allow testing.
input_func = input
DEFAULT_REPOSITORY = "https://upload.pypi.org/legacy/"
TEST_REPOSITORY = "https://test.pypi.org/legacy/"
DEFAULT_CONFIG_FILE = "~/.pypirc"
# TODO: In general, it seems to be assumed that the values retrieved from
# instances of this type aren't None, except for username and password.
# Type annotations would be cleaner if this were Dict[str, str], but that
# requires reworking the username/password handling, probably starting with
# get_userpass_value.
RepositoryConfig = Dict[str, Optional[str]]
logger = logging.getLogger(__name__)
def get_config(path: str) -> Dict[str, RepositoryConfig]:
"""Read repository configuration from a file (i.e. ~/.pypirc).
Format: https://packaging.python.org/specifications/pypirc/
If the default config file doesn't exist, return a default configuration for
pypyi and testpypi.
"""
realpath = os.path.realpath(os.path.expanduser(path))
parser = configparser.RawConfigParser()
try:
with open(realpath) as f:
parser.read_file(f)
logger.info(f"Using configuration from {realpath}")
except FileNotFoundError:
# User probably set --config-file, but the file can't be read
if path != DEFAULT_CONFIG_FILE:
raise
# server-login is obsolete, but retained for backwards compatibility
defaults: RepositoryConfig = {
"username": parser.get("server-login", "username", fallback=None),
"password": parser.get("server-login", "password", fallback=None),
}
config: DefaultDict[str, RepositoryConfig]
config = collections.defaultdict(lambda: defaults.copy())
index_servers = parser.get(
"distutils", "index-servers", fallback="pypi testpypi"
).split()
# Don't require users to manually configure URLs for these repositories
config["pypi"]["repository"] = DEFAULT_REPOSITORY
if "testpypi" in index_servers:
config["testpypi"]["repository"] = TEST_REPOSITORY
# Optional configuration values for individual repositories
for repository in index_servers:
for key in [
"username",
"repository",
"password",
"ca_cert",
"client_cert",
]:
if parser.has_option(repository, key):
config[repository][key] = parser.get(repository, key)
# Convert the defaultdict to a regular dict to prevent surprising behavior later on
return dict(config)
def _validate_repository_url(repository_url: str) -> None:
"""Validate the given url for allowed schemes and components."""
# Allowed schemes are http and https, based on whether the repository
# supports TLS or not, and scheme and host must be present in the URL
validator = (
rfc3986.validators.Validator()
.allow_schemes("http", "https")
.require_presence_of("scheme", "host")
)
try:
validator.validate(rfc3986.uri_reference(repository_url))
except rfc3986.exceptions.RFC3986Exception as exc:
raise exceptions.UnreachableRepositoryURLDetected(
f"Invalid repository URL: {exc.args[0]}."
)
def get_repository_from_config(
config_file: str,
repository: str,
repository_url: Optional[str] = None,
) -> RepositoryConfig:
"""Get repository config command-line values or the .pypirc file."""
# Prefer CLI `repository_url` over `repository` or .pypirc
if repository_url:
_validate_repository_url(repository_url)
return {
"repository": repository_url,
"username": None,
"password": None,
}
try:
return get_config(config_file)[repository]
except OSError as exc:
raise exceptions.InvalidConfiguration(str(exc))
except KeyError:
raise exceptions.InvalidConfiguration(
f"Missing '{repository}' section from {config_file}.\n"
f"More info: https://packaging.python.org/specifications/pypirc/ "
)
_HOSTNAMES = {
"pypi.python.org",
"testpypi.python.org",
"upload.pypi.org",
"test.pypi.org",
}
def normalize_repository_url(url: str) -> str:
parsed = urlparse(url)
if parsed.netloc in _HOSTNAMES:
return urlunparse(("https",) + parsed[1:])
return urlunparse(parsed)
def get_file_size(filename: str) -> str:
"""Return the size of a file in KB, or MB if >= 1024 KB."""
file_size = os.path.getsize(filename) / 1024
size_unit = "KB"
if file_size > 1024:
file_size = file_size / 1024
size_unit = "MB"
return f"{file_size:.1f} {size_unit}"
def check_status_code(response: requests.Response, verbose: bool) -> None:
"""Generate a helpful message based on the response from the repository.
Raise a custom exception for recognized errors. Otherwise, print the
response content (based on the verbose option) before re-raising the
HTTPError.
"""
if response.status_code == 410 and "pypi.python.org" in response.url:
raise exceptions.UploadToDeprecatedPyPIDetected(
f"It appears you're uploading to pypi.python.org (or "
f"testpypi.python.org). You've received a 410 error response. "
f"Uploading to those sites is deprecated. The new sites are "
f"pypi.org and test.pypi.org. Try using {DEFAULT_REPOSITORY} (or "
f"{TEST_REPOSITORY}) to upload your packages instead. These are "
f"the default URLs for Twine now. More at "
f"https://packaging.python.org/guides/migrating-to-pypi-org/."
)
elif response.status_code == 405 and "pypi.org" in response.url:
raise exceptions.InvalidPyPIUploadURL(
f"It appears you're trying to upload to pypi.org but have an "
f"invalid URL. You probably want one of these two URLs: "
f"{DEFAULT_REPOSITORY} or {TEST_REPOSITORY}. Check your "
f"--repository-url value."
)
try:
response.raise_for_status()
except requests.HTTPError as err:
if not verbose:
logger.warning(
"Error during upload. "
"Retry with the --verbose option for more details."
)
raise err
def get_userpass_value(
cli_value: Optional[str],
config: RepositoryConfig,
key: str,
prompt_strategy: Optional[Callable[[], str]] = None,
) -> Optional[str]:
"""Get a credential (e.g. a username or password) from the configuration.
Uses the following rules:
1. If ``cli_value`` is specified, use that.
2. If ``config[key]`` is specified, use that.
3. If ``prompt_strategy`` is specified, use its return value.
4. Otherwise return ``None``
:param cli_value:
The value supplied from the command line.
:param config:
A dictionary of repository configuration values.
:param key:
The credential to look up in ``config``, e.g. ``"username"`` or ``"password"``.
:param prompt_strategy:
An argumentless function to get the value, e.g. from keyring or by prompting
the user.
:return:
The credential value, i.e. the username or password.
"""
if cli_value is not None:
logger.info(f"{key} set by command options")
return cli_value
elif config.get(key) is not None:
logger.info(f"{key} set from config file")
return config[key]
elif prompt_strategy:
warning = ""
value = prompt_strategy()
if not value:
warning = f"Your {key} is empty"
elif any(unicodedata.category(c).startswith("C") for c in value):
# See https://www.unicode.org/reports/tr44/#General_Category_Values
# Most common case is "\x16" when pasting in Windows Command Prompt
warning = f"Your {key} contains control characters"
if warning:
logger.warning(f"{warning}. Did you enter it correctly?")
logger.warning(
"See https://twine.readthedocs.io/#entering-credentials "
"for more information."
)
return value
else:
return None
#: Get the CA bundle via :func:`get_userpass_value`.
get_cacert = functools.partial(get_userpass_value, key="ca_cert")
#: Get the client certificate via :func:`get_userpass_value`.
get_clientcert = functools.partial(get_userpass_value, key="client_cert")
class EnvironmentDefault(argparse.Action):
"""Get values from environment variable."""
def __init__(
self,
env: str,
required: bool = True,
default: Optional[str] = None,
**kwargs: Any,
) -> None:
default = os.environ.get(env, default)
self.env = env
if default:
required = False
super().__init__(default=default, required=required, **kwargs)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None,
) -> None:
setattr(namespace, self.dest, values)
class EnvironmentFlag(argparse.Action):
"""Set boolean flag from environment variable."""
def __init__(self, env: str, **kwargs: Any) -> None:
default = self.bool_from_env(os.environ.get(env))
self.env = env
super().__init__(default=default, nargs=0, **kwargs)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None,
) -> None:
setattr(namespace, self.dest, True)
@staticmethod
def bool_from_env(val: Optional[str]) -> bool:
"""Allow '0' and 'false' and 'no' to be False."""
falsey = {"0", "false", "no"}
return bool(val and val.lower() not in falsey)
|
|
import time
import socket
import gevent
import numpy as np
import sys
import cv2
from mabopy.config.load_config import LoadConfig
import cvlib
conf = LoadConfig("config.toml").config
"""
def match():
img = cv2.imread("box_in_scene2.png")#sys.argv[1])
temp = cv2.imread("box4.png")#sys.argv[2])
try:
dist = int(sys.argv[3])
except IndexError:
dist = 200
try:
num = int(sys.argv[4])
except IndexError:
num = -1
skp, tkp = findKeyPoints(img, temp, dist)
newimg = drawKeyPoints(img, temp, skp, tkp, num)
cv2.imshow("image", newimg)
cv2.waitKey(0)
"""
def supress(v, w):
#v[0],v[1],
print v
if v[2] < w/2 and v[2] > 20:# and v[0] - v[2] >0 and v[1] - v[2]>0 :
return True
def main():
print conf
target = cv2.imread(conf["app"]["target"])#sys.argv[2])
#target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
#print type(target)
#cv2.NamedWindow("camera", 1)
#capture = cv2.VideoCapture(0)
capture = cv2.VideoCapture(conf["app"]["camera_uri"])
i = 0
pt1 = (conf["app"]["crop_start"][0],conf["app"]["crop_start"][1])
w = conf["app"]["corp_width"]
pt2 = (pt1[0]+w,pt1[1]+w)
debug = 1# conf["app"]["debug"]
cp = [0,0]
while True:
#i = i +1
#if i > 200:
# i = 0
ret, img_read = capture.read() #cv.QueryFrame(capture)
print dir(img_read)
time.sleep(1)
continue
#if i == 1:
# pass
if ret == False:
raise(Exception("can't connect camera"))
#mat=cv2.GetMat(img)
#img_p = np.asarray(mat)
#img_p = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1)
#print dir(img)
"""
im_gray = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1)
cv.CvtColor(img,im_gray,cv.CV_RGB2GRAY)
# Sobel operator
dstSobel = cv.CreateMat(im_gray.height, im_gray.width, cv.CV_32FC1)
# Sobel(src, dst, xorder, yorder, apertureSize = 3)
cv.Sobel(im_gray,dstSobel,1,1,3)
"""
#print ret
try:
# skp: source key points, tkp: target key points
t1 = time.time()
#img[200:400, 100:300] # Crop from x, y, w, h -> 100, 200, 300, 400
#im[y1:y2, x1:x2]
#
crop_img = img_read[pt1[1]:pt2[1], pt1[0]:pt2[0]]
#print(len(crop_img))
distance = conf["app"]["distance"]
skp, tkp = cvlib.findKeyPoints(crop_img , target, distance)
if skp == None:
print("skp is none")
img_read = cv2.medianBlur(img_read,5)
img_read = cv2.cvtColor(img_read, cv2.COLOR_BGR2GRAY)
cv2.imshow("camera", img_read)
#continue
else:
print "==" * 20
print "time:[%.3f]" %(time.time() - t1)
print "skp", len(skp)#, skp
print "tkp",len(tkp)#, tkp
if debug:
crop_img = cv2.medianBlur(crop_img,5)
gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT,
45, ## dp
80, ## minDist
param1=140,
param2=118, ##
minRadius=30,
maxRadius=0)
print circles
circles = np.uint16(np.around(circles))
j = 0
cv2.rectangle(img_read, pt1, pt2, (0,255,0))
for i in circles[0,:]:
if supress(i, w):
j = j + 1
"""if i[0] - cp[0] > 30 or i[1] - cp[1] > 30 :
pass
else:
"""
cv2.circle(img_read,(pt1[0]+i[0],pt1[1]+i[1]),i[2],(0,255,0),2)
cv2.circle(img_read,(pt1[0]+i[0],pt1[1]+i[1]),2,(0,0,255),3)
cp = [ i[0], i[1] ]
#newimg = cvlib.drawKeyPoints(img_read, target, skp, tkp, pt1, pt2, -1)
cv2.imshow("camera", img_read)
#gevent.sleep(1)
except Exception as ex:
print(ex)
#gevent.sleep(3)
continue
#cv.ShowImage('camera', newimg)
# image smoothing and subtraction
# imageBlur = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels)
# # filering the original image
# # Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)
# cv.Smooth(im_gray, imageBlur, cv.CV_BLUR, 11, 11)
# diff = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels)
# # subtraction (original - filtered)
# cv.AbsDiff(im_gray,imageBlur,diff)
# cv.ShowImage('camera', diff)
if cv2.waitKey(10) == 27:
break
#gevent.sleep(0.1)
# cv2.destroyWindow("camera")
if __name__ == "__main__":
main()
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* SpecifyModel: allows specification of sparse and non-sparse models
"""
import os
from copy import deepcopy
import numpy as np
from scipy.signal import convolve
from scipy.special import gammaln
#from scipy.stats.distributions import gamma
from nipype.externals.pynifti import load
from nipype.interfaces.base import BaseInterface, TraitedSpec,\
InputMultiPath, traits, File
from nipype.utils.misc import isdefined
from nipype.utils.filemanip import filename_to_list
from nipype.interfaces.spm import scans_for_fnames
class SpecifyModelInputSpec(TraitedSpec):
subject_id = traits.Either(traits.Str(),traits.Int(),mandatory=True,
desc ="Subject identifier used as a parameter to the subject_info_func.")
subject_info = traits.List(mandatory=True,
desc= "List subject specific condition information")
""" . If all
subjects had the same stimulus presentation schedule,
then this function can return the same structure
independent of the subject. This function must return a
list of dicts with the list length equal to the number of
sessions. The dicts should contain the following
information.
conditions : list of names
onsets : lists of onsets corresponding to each
condition
durations : lists of durations corresponding to each
condition. Should be left to a single 0 if all
events are being modelled as impulses.
amplitudes : lists of amplitudes for each event. This
is ignored by SPM
tmod : lists of conditions that should be temporally
modulated. Should default to None if not being used.
pmod : list of dicts corresponding to conditions
name : name of parametric modulator
param : values of the modulator
poly : degree of modulation
regressors : list of dicts or matfile
names : list of names corresponding to each
column. Should be None if automatically
assigned.
values : lists of values for each regressor
kernel : list of convolution kernel
"""
realignment_parameters = InputMultiPath(File(exists=True),
desc = "Realignment parameters returned by motion correction algorithm",
filecopy=False)
outlier_files = InputMultiPath(File(exists=True),
desc="Files containing scan outlier indices that should be tossed",
filecopy=False)
functional_runs = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
mandatory=True,
desc="Data files for model. List of 4D files or list of" \
"list of 3D files per session",
filecopy=False)
input_units = traits.Enum('secs', 'scans', mandatory=True,
desc = "Units of event onsets and durations (secs or scans)")
output_units = traits.Enum('secs', 'scans', mandatory=True,
desc = "Units of design event onsets and durations " \
"(secs or scans)")
high_pass_filter_cutoff = traits.Float(desc = \
"High-pass filter cutoff in secs")
concatenate_runs = traits.Bool(False, usedefault=True,
desc="Concatenating all runs to look like a single session.")
time_repetition = traits.Float(mandatory=True,
desc = "Time between the start of one volume to the start of " \
"the next image volume.")
# Not implemented yet
#polynomial_order = traits.Range(-1, low=-1,
# desc ="Number of polynomial functions to model high pass filter.")
#generate_design = traits.Bool(False, usedefault=True,
# desc="Generate a design matrix")
#Sparse and clustered-sparse specific options
is_sparse = traits.Bool(False, usedefault = True,
desc="indicates whether paradigm is sparse")
time_acquisition = traits.Float(0,
desc = "Time in seconds to acquire a single image volume")
volumes_in_cluster = traits.Range(low=0,
desc="Number of scan volumes in a cluster")
model_hrf = traits.Bool(desc="model sparse events with hrf")
stimuli_as_impulses = traits.Bool(True,
desc = "Treat each stimulus to be impulse like.",
usedefault=True)
scan_onset = traits.Float(0.0,
desc="Start of scanning relative to onset of run in secs",
usedefault=True)
class SpecifyModelOutputSpec(TraitedSpec):
session_info = File(exists=True,
desc="session info saved in a numpy file for level1designs")
#design_file = File(desc="design file")
class SpecifyModel(BaseInterface):
"""Makes a model specification
Parameters
----------
inputs : dict
key, value pairs that will update the SpecifyModel.inputs
attributes. See self.inputs_help() for a list attributes.
Attributes
----------
inputs : :class:`nipype.interfaces.base.Bunch`
Options that can be passed to spm_spm via a job structure
cmdline : str
String used to call matlab/spm via SpmMatlabCommandLine
interface
Other Parameters
----------------
To see optional arguments SpecifyModel().inputs_help()
"""
input_spec = SpecifyModelInputSpec
output_spec = SpecifyModelOutputSpec
def _scaletimings(self,timelist,input_units=None,output_units=None):
if input_units is None:
input_units = self.inputs.input_units
if output_units is None:
output_units = self.inputs.output_units
if input_units==output_units:
self._scalefactor = 1.
if (input_units == 'scans') and (output_units == 'secs'):
if isdefined(self.inputs.volumes_in_cluster) and (self.inputs.volumes_in_cluster > 1):
raise NotImplementedError("cannot scale timings if times are scans and acquisition is clustered")
else:
self._scalefactor = self.inputs.time_repetition
if (input_units == 'secs') and (output_units == 'scans'):
self._scalefactor = 1./self.inputs.time_repetition
#if self._scalefactor > 1:
timelist = [np.max([0.,self._scalefactor*t]) for t in timelist]
#else:
# timelist = [round(self._scalefactor*t) for t in timelist]
return timelist
def _gcd(self,a,b):
"""Returns the greates common divisor of two integers
uses Euclid's algorithm
"""
while b > 0: a,b = b, a%b
return a
def _spm_hrf(self,RT,P=[],fMRI_T=16):
""" python implementation of spm_hrf
see spm_hrf for implementation details
% RT - scan repeat time
% p - parameters of the response function (two gamma
% functions)
% defaults (seconds)
% p(0) - delay of response (relative to onset) 6
% p(1) - delay of undershoot (relative to onset) 16
% p(2) - dispersion of response 1
% p(3) - dispersion of undershoot 1
% p(4) - ratio of response to undershoot 6
% p(5) - onset (seconds) 0
% p(6) - length of kernel (seconds) 32
%
% hrf - hemodynamic response function
% p - parameters of the response function
>>> import nipype.algorithms.modelgen as model
>>> print model.SpecifyModel()._spm_hrf(2)
[ 0.00000000e+00 8.65660810e-02 3.74888236e-01 3.84923382e-01
2.16117316e-01 7.68695653e-02 1.62017720e-03 -3.06078117e-02
-3.73060781e-02 -3.08373716e-02 -2.05161334e-02 -1.16441637e-02
-5.82063147e-03 -2.61854250e-03 -1.07732374e-03 -4.10443522e-04
-1.46257507e-04]
"""
p = np.array([6,16,1,1,6,0,32],dtype=float)
if len(P)>0:
p[0:len(P)] = P
_spm_Gpdf = lambda x,h,l: np.exp(h*np.log(l)+(h-1)*np.log(x)-(l*x)-gammaln(h))
# modelled hemodynamic response function - {mixture of Gammas}
dt = RT/float(fMRI_T)
u = np.arange(0,int(p[6]/dt+1)) - p[5]/dt
# the following code using scipy.stats.distributions.gamma
# doesn't return the same result as the spm_Gpdf function
# hrf = gamma.pdf(u,p[0]/p[2],scale=dt/p[2]) - gamma.pdf(u,p[1]/p[3],scale=dt/p[3])/p[4]
hrf = _spm_Gpdf(u,p[0]/p[2],dt/p[2]) - _spm_Gpdf(u,p[1]/p[3],dt/p[3])/p[4]
idx = np.arange(0,int((p[6]/RT)+1))*fMRI_T
hrf = hrf[idx]
hrf = hrf/np.sum(hrf)
return hrf
def _gen_regress(self,i_onsets,i_durations,i_amplitudes,nscans,bplot=False):
"""Generates a regressor for a sparse/clustered-sparse acquisition
see Ghosh et al. (2009) OHBM 2009
"""
if bplot:
import matplotlib.pyplot as plt
TR = np.round(self.inputs.time_repetition*1000) # in ms
if self.inputs.time_acquisition:
TA = np.round(self.inputs.time_acquisition*1000) # in ms
else:
TA = TR # in ms
nvol = self.inputs.volumes_in_cluster
SCANONSET = np.round(self.inputs.scan_onset*1000)
total_time = TR*(nscans-nvol)/nvol + TA*nvol + SCANONSET
SILENCE = TR-TA*nvol
dt = TA/10.;
durations = np.round(np.array(i_durations)*1000)
if len(durations) == 1:
durations = durations*np.ones((len(i_onsets)))
onsets = np.round(np.array(i_onsets)*1000)
dttemp = self._gcd(TA,self._gcd(SILENCE,TR))
if dt < dttemp:
if dttemp % dt != 0:
dt = self._gcd(dttemp,dt)
if dt < 1:
raise Exception("Time multiple less than 1 ms")
print "Setting dt = %d ms\n" % dt
npts = int(total_time/dt)
times = np.arange(0,total_time,dt)*1e-3
timeline = np.zeros((npts))
timeline2 = np.zeros((npts))
hrf = self._spm_hrf(dt*1e-3)
for i,t in enumerate(onsets):
idx = int(t/dt)
if i_amplitudes:
if len(i_amplitudes)>1:
timeline2[idx] = i_amplitudes[i]
else:
timeline2[idx] = i_amplitudes[0]
else:
timeline2[idx] = 1
if bplot:
plt.subplot(4,1,1)
plt.plot(times,timeline2)
if not self.inputs.stimuli_as_impulses:
if durations[i] == 0:
durations[i] = TA*nvol
stimdur = np.ones((int(durations[i]/dt)))
timeline2 = convolve(timeline2,stimdur)[0:len(timeline2)]
timeline += timeline2
timeline2[:] = 0
if bplot:
plt.subplot(4,1,2)
plt.plot(times,timeline)
if self.inputs.model_hrf:
timeline = convolve(timeline,hrf)[0:len(timeline)]
if bplot:
plt.subplot(4,1,3)
plt.plot(times,timeline)
# sample timeline
timeline2 = np.zeros((npts))
reg = []
for i,trial in enumerate(np.arange(nscans)/nvol):
scanstart = int((SCANONSET + trial*TR + (i%nvol)*TA)/dt)
#print total_time/dt, SCANONSET, TR, TA, scanstart, trial, i%2, int(TA/dt)
scanidx = scanstart+np.arange(int(TA/dt))
timeline2[scanidx] = np.max(timeline)
reg.insert(i,np.mean(timeline[scanidx]))
if bplot:
plt.subplot(4,1,3)
plt.plot(times,timeline2)
plt.subplot(4,1,4)
plt.bar(np.arange(len(reg)),reg,width=0.5)
return reg
def _cond_to_regress(self,info,nscans):
"""Converts condition information to full regressors
"""
reg = []
regnames = info.conditions
for i,c in enumerate(info.conditions):
if info.amplitudes:
amplitudes = info.amplitudes[i]
else:
amplitudes = None
reg.insert(i,self._gen_regress(self._scaletimings(info.onsets[i],output_units='secs'),
self._scaletimings(info.durations[i],output_units='secs'),
amplitudes,
nscans))
# need to deal with temporal and parametric modulators
# for sparse-clustered acquisitions enter T1-effect regressors
nvol = self.inputs.volumes_in_cluster
if nvol > 1:
for i in range(nvol-1):
treg = np.zeros((nscans/nvol,nvol))
treg[:,i] = 1
reg.insert(len(reg),treg.ravel().tolist())
return reg,regnames
def _generate_clustered_design(self,infolist):
"""Generates condition information for sparse-clustered
designs.
"""
infoout = deepcopy(infolist)
for i,info in enumerate(infolist):
infoout[i].conditions = None
infoout[i].onsets = None
infoout[i].durations = None
if info.conditions:
img = load(self.inputs.functional_runs[i])
nscans = img.get_shape()[3]
reg,regnames = self._cond_to_regress(info,nscans)
if not infoout[i].regressors:
infoout[i].regressors = []
infoout[i].regressor_names = []
else:
if not infoout[i].regressor_names:
infoout[i].regressor_names = ['R%d'%j for j in range(len(infoout[i].regressors))]
for j,r in enumerate(reg):
regidx = len(infoout[i].regressors)
infoout[i].regressor_names.insert(regidx,regnames[j])
infoout[i].regressors.insert(regidx,r)
return infoout
def _generate_standard_design(self,infolist,
functional_runs=None,
realignment_parameters=None,
outliers=None):
""" Generates a standard design matrix paradigm
"""
sessinfo = []
# dt = np.dtype({'names':['name', 'param', 'poly'],
# 'formats':[object, object, object]})
# sessinfo[i]['pmod'] = np.zeros((len(info.pmod),), dtype=dt)
for i,info in enumerate(infolist):
sessinfo.insert(i,dict(cond=[]))
if self.inputs.high_pass_filter_cutoff:
sessinfo[i]['hpf'] = np.float(self.inputs.high_pass_filter_cutoff)
if info.conditions:
for cid,cond in enumerate(info.conditions):
sessinfo[i]['cond'].insert(cid,dict())
sessinfo[i]['cond'][cid]['name'] = info.conditions[cid]
sessinfo[i]['cond'][cid]['onset'] = self._scaletimings(info.onsets[cid])
sessinfo[i]['cond'][cid]['duration'] = self._scaletimings(info.durations[cid])
if info.tmod and len(info.tmod)>cid:
sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid]
if info.pmod and len(info.pmod)>cid:
if info.pmod[cid]:
sessinfo[i]['cond'][cid]['pmod'] = []
for j,name in enumerate(info.pmod[cid].name):
sessinfo[i]['cond'][cid]['pmod'].insert(j,{})
sessinfo[i]['cond'][cid]['pmod'][j]['name'] = name
sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = info.pmod[cid].poly[j]
sessinfo[i]['cond'][cid]['pmod'][j]['param'] = info.pmod[cid].param[j]
sessinfo[i]['regress']= []
if info.regressors is not None:
for j,r in enumerate(info.regressors):
sessinfo[i]['regress'].insert(j,dict(name='',val=[]))
if info.regressor_names is not None:
sessinfo[i]['regress'][j]['name'] = info.regressor_names[j]
else:
sessinfo[i]['regress'][j]['name'] = 'UR%d'%(j+1)
sessinfo[i]['regress'][j]['val'] = info.regressors[j]
if isdefined(functional_runs):
sessinfo[i]['scans'] = scans_for_fnames(filename_to_list(functional_runs[i]),keep4d=False)
else:
raise Exception("No functional data information provided for model")
if isdefined(realignment_parameters):
for i,rp in enumerate(realignment_parameters):
mc = realignment_parameters[i]
for col in range(mc.shape[1]):
colidx = len(sessinfo[i]['regress'])
sessinfo[i]['regress'].insert(colidx,dict(name='',val=[]))
sessinfo[i]['regress'][colidx]['name'] = 'Realign%d'%(col+1)
sessinfo[i]['regress'][colidx]['val'] = mc[:,col].tolist()
if isdefined(outliers):
for i,out in enumerate(outliers):
numscans = len(sessinfo[i]['scans'])
print numscans
for j,scanno in enumerate(out):
if True:
colidx = len(sessinfo[i]['regress'])
sessinfo[i]['regress'].insert(colidx,dict(name='',val=[]))
sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d'%(j+1)
sessinfo[i]['regress'][colidx]['val'] = np.zeros((1,numscans))[0].tolist()
sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1
else:
cid = len(sessinfo[i]['cond'])
sessinfo[i]['cond'].insert(cid,dict())
sessinfo[i]['cond'][cid]['name'] = "O%d"%(j+1)
sessinfo[i]['cond'][cid]['onset'] = self._scaletimings([scanno])
sessinfo[i]['cond'][cid]['duration'] = [0]
return sessinfo
def _concatenate_info(self,infolist):
nscans = []
for i,f in enumerate(filename_to_list(self.inputs.functional_runs)):
if isinstance(f,list):
numscans = len(f)
elif isinstance(f,str):
img = load(f)
numscans = img.get_shape()[3]
else:
raise Exception('Functional input not specified correctly')
nscans.insert(i, numscans)
# now combine all fields into 1
# names,onsets,durations,amplitudes,pmod,tmod,regressor_names,regressors
infoout = infolist[0]
for i,info in enumerate(infolist[1:]):
#info.[conditions,tmod] remain the same
if info.onsets:
for j,val in enumerate(info.onsets):
if self.inputs.input_units == 'secs':
infoout.onsets[j].extend((np.array(info.onsets[j])+
self.inputs.time_repetition*sum(nscans[0:(i+1)])).tolist())
else:
infoout.onsets[j].extend((np.array(info.onsets[j])+sum(nscans[0:(i+1)])).tolist())
for j,val in enumerate(info.durations):
if len(val) > 1:
infoout.durations[j].extend(info.durations[j])
if info.pmod:
for j,val in enumerate(info.pmod):
if val:
for key,data in enumerate(val.param):
infoout.pmod[j].param[key].extend(data)
if info.regressors:
#assumes same ordering of regressors across different
#runs and the same names for the regressors
for j,v in enumerate(info.regressors):
infoout.regressors[j].extend(info.regressors[j])
#insert session regressors
if not infoout.regressors:
infoout.regressors = []
onelist = np.zeros((1,sum(nscans)))
onelist[0,sum(nscans[0:(i)]):sum(nscans[0:(i+1)])] = 1
infoout.regressors.insert(len(infoout.regressors),onelist.tolist()[0])
return [infoout],nscans
def _generate_design(self):
infolist = self.inputs.subject_info
if self.inputs.concatenate_runs:
infolist,nscans = self._concatenate_info(infolist)
functional_runs = [filename_to_list(self.inputs.functional_runs)]
else:
functional_runs = filename_to_list(self.inputs.functional_runs)
realignment_parameters = []
if isdefined(self.inputs.realignment_parameters):
rpfiles = filename_to_list(self.inputs.realignment_parameters)
realignment_parameters.insert(0,np.loadtxt(rpfiles[0]))
for rpf in rpfiles[1:]:
mc = np.loadtxt(rpf)
if self.inputs.concatenate_runs:
realignment_parameters[0] = np.concatenate((realignment_parameters[0],mc))
else:
realignment_parameters.insert(len(realignment_parameters),mc)
outliers = []
if isdefined(self.inputs.outlier_files):
outfiles = filename_to_list(self.inputs.outlier_files)
try:
outindices = np.loadtxt(outfiles[0],dtype=int)
if outindices.size == 1:
outliers.insert(0,[outindices.tolist()])
else:
outliers.insert(0,outindices.tolist())
except IOError:
outliers.insert(0,[])
for i,rpf in enumerate(outfiles[1:]):
try:
out = np.loadtxt(rpf,dtype=int)
except IOError:
out = np.array([])
if self.inputs.concatenate_runs:
if out.size>0:
if out.size == 1:
outliers[0].extend([(np.array(out)+sum(nscans[0:(i+1)])).tolist()])
else:
outliers[0].extend((np.array(out)+sum(nscans[0:(i+1)])).tolist())
else:
if out.size == 1:
outliers.insert(len(outliers),[out.tolist()])
else:
outliers.insert(len(outliers),out.tolist())
if self.inputs.is_sparse:
infolist = self._generate_clustered_design(infolist)
sessinfo = self._generate_standard_design(infolist,
functional_runs=functional_runs,
realignment_parameters=realignment_parameters,
outliers=outliers)
np.savez(self._get_outfilename(),session_info=sessinfo)
def _run_interface(self, runtime):
"""
"""
self._generate_design()
runtime.returncode = 0
return runtime
def _get_outfilename(self):
return os.path.join(os.getcwd(),'%s_modelspec.npz'%self.inputs.subject_id)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['session_info'] = self._get_outfilename()
return outputs
|
|
"""Ensembler module for homolog structures"""
__author__ = "Jens Thomas, and Felix Simkovic"
__date__ = "17 Nov 2016"
__version__ = "1.0"
import logging
import os
import shutil
import sys
from ample.ensembler import _ensembler
from ample.ensembler import truncation_util
from ample.ensembler.constants import SIDE_CHAIN_TREATMENTS
from ample.util import ample_util, pdb_edit, sequence_util
logger = logging.getLogger(__name__)
def align_mustang(models, mustang_exe=None, work_dir=None):
if not ample_util.is_exe(mustang_exe):
msg = "Cannot find mustang executable: {0}".format(mustang_exe)
raise RuntimeError(msg)
owd = os.getcwd()
if not work_dir:
work_dir = owd
work_dir = os.path.abspath(work_dir)
if not os.path.isdir(work_dir):
os.mkdir(work_dir)
os.chdir(work_dir)
logfile = os.path.join(work_dir, 'mustang.log')
basename = 'mustang'
cmd = [mustang_exe, '-F', 'fasta', '-o', basename, '-i'] + models
rtn = ample_util.run_command(cmd, logfile=logfile, directory=work_dir)
if not rtn == 0:
msg = "Error running mustang. Check logfile: {0}".format(logfile)
raise RuntimeError(msg)
alignment_file = os.path.join(work_dir, basename + ".afasta")
if not os.path.isfile(alignment_file):
msg = "Could not find alignment file: {0} after running mustang!".format(alignment_file)
raise RuntimeError(msg)
os.chdir(owd) # always need to go back to original directory
return alignment_file
def align_gesamt(models, gesamt_exe=None, work_dir=None):
if not ample_util.is_exe(gesamt_exe):
msg = "Cannot find gesamt executable: {0}".format(gesamt_exe)
raise RuntimeError(msg)
owd = os.getcwd()
if not work_dir:
work_dir = owd
work_dir = os.path.abspath(work_dir)
if not os.path.isdir(work_dir):
os.mkdir(work_dir)
os.chdir(work_dir)
# Need to map chain name to pdb
model2chain = {}
for m in models:
seqd = sequence_util.sequence(m)
if len(seqd) != 1:
msg = "Model {0} does not contain a single chain, got: {1}".format(*seqd.keys())
raise RuntimeError(msg)
model2chain[m] = list(seqd.keys())[0]
basename = 'gesamt'
logfile = os.path.join(work_dir, 'gesamt.log')
alignment_file = os.path.join(work_dir, basename + ".afasta")
# Build up command-line
cmd = [gesamt_exe]
# We iterate through the models to make sure the order stays the same
for m in models:
cmd += [m, '-s', model2chain[m]]
cmd += ['-o', '{0}.pdb'.format(basename), '-a', alignment_file]
rtn = ample_util.run_command(cmd, logfile=logfile, directory=work_dir)
if not rtn == 0:
msg = "Error running gesamt. Check logfile: {0}".format(logfile)
raise RuntimeError(msg)
if not os.path.isfile(alignment_file):
msg = "Gesamt did not generate an alignment file.\nPlease check the logfile: {0}".format(logfile)
raise RuntimeError(msg)
if sys.platform.startswith("win"):
alignment_file = _gesamt_aln_windows_fix(alignment_file)
os.chdir(owd) # always need to go back to original directory
return alignment_file
## BUG: reported to Eugene - 22/03/2016 by hlfsimko
def _gesamt_aln_windows_fix(alnf):
"""fix for MSA to be readable by Theseus"""
shutil.copy(alnf, alnf + '.backup') # create backup file
with open(alnf, "w") as outfh:
for line in open(alnf + '.backup', "r").readlines():
if line.startswith(">"):
line = line[0] + os.path.basename(line[1:])
outfh.write(line)
return alnf
class HomologEnsembler(_ensembler.Ensembler):
"""Ensemble creator using on multiple distant homologous structures
"""
def __init__(self, **kwargs):
# Inherit all functions from Parent Ensembler
super(HomologEnsembler, self).__init__(**kwargs)
self.truncator = None
return
def generate_ensembles(
self,
models,
alignment_file=None,
homolog_aligner=None,
percent_fixed_intervals=None,
percent_truncation=None,
side_chain_treatments=SIDE_CHAIN_TREATMENTS,
truncation_method=None,
**kwargs
):
if not percent_truncation:
percent_truncation = self.percent_truncation
if not truncation_method:
truncation_method = self.truncation_method
if not len(models):
msg = "Cannot find any models for ensembling!"
raise RuntimeError(msg)
if not all([os.path.isfile(m) for m in models]):
msg = "Problem reading models given to Ensembler: {0}".format(models)
raise RuntimeError(msg)
logger.info('Ensembling models in directory: %s', self.work_dir)
# Create final ensembles directory
if not os.path.isdir(self.ensembles_directory):
os.mkdir(self.ensembles_directory)
# standardise all the models
std_models_dir = os.path.join(self.work_dir, "std_models")
os.mkdir(std_models_dir)
std_models = []
for m in models:
std_model = ample_util.filename_append(m, 'std', std_models_dir)
pdb_edit.standardise(pdbin=m, pdbout=std_model, del_hetatm=True)
std_models.append(std_model)
# Get a structural alignment between the different models
if not alignment_file:
if homolog_aligner == 'mustang':
logger.info("Generating alignment file with mustang_exe: %s", self.mustang_exe)
alignment_file = align_mustang(std_models, mustang_exe=self.mustang_exe, work_dir=self.work_dir)
elif homolog_aligner == 'gesamt':
logger.info("Generating alignment file with gesamt_exe: %s", self.gesamt_exe)
alignment_file = align_gesamt(std_models, gesamt_exe=self.gesamt_exe, work_dir=self.work_dir)
else:
msg = "Unknown homolog_aligner: {0}".format(homolog_aligner)
raise RuntimeError(msg)
logger.info("Generated alignment file: %s", alignment_file)
else:
logger.info("Using alignment file: %s", alignment_file)
truncate_dir = os.path.join(self.work_dir, "homolog_truncate")
if not os.path.isdir(truncate_dir):
os.mkdir(truncate_dir)
# Now truncate and create ensembles - as standard ample, but with no subclustering
self.ensembles = []
self.truncator = truncation_util.Truncator(work_dir=truncate_dir)
self.truncator.theseus_exe = self.theseus_exe
for truncation in self.truncator.truncate_models(
models=std_models,
truncation_method=truncation_method,
percent_fixed_intervals=percent_fixed_intervals,
percent_truncation=percent_truncation,
truncation_pruning=None,
homologs=True,
alignment_file=alignment_file,
):
ensemble_dir = os.path.join(truncation.directory, "ensemble_{0}".format(truncation.level))
os.mkdir(ensemble_dir)
os.chdir(ensemble_dir)
# Need to create an alignment file for theseus
basename = "e{0}".format(truncation.level)
superposed_models = self.superpose_models(
truncation.models, basename=basename, work_dir=ensemble_dir, homologs=True
)
if not superposed_models:
logger.critical("Skipping ensemble %s due to error with Theseus", basename)
continue
# Create Ensemble object
pre_ensemble = _ensembler.Ensemble()
pre_ensemble.num_residues = truncation.num_residues
pre_ensemble.truncation_dir = truncation.directory
pre_ensemble.truncation_level = truncation.level
pre_ensemble.truncation_method = truncation.method
pre_ensemble.truncation_percent = truncation.percent
pre_ensemble.truncation_residues = truncation.residues
pre_ensemble.truncation_variance = truncation.variances
pre_ensemble.pdb = superposed_models
for ensemble in self.edit_side_chains(pre_ensemble, side_chain_treatments, homologs=True):
self.ensembles.append(ensemble)
return self.ensembles
def generate_ensembles_from_amoptd(self, models, amoptd):
kwargs = {
'percent_truncation': amoptd['percent'],
'percent_fixed_intervals': amoptd['percent_fixed_intervals'],
'side_chain_treatments': amoptd['side_chain_treatments'],
'truncation_method': amoptd['truncation_method'],
'alignment_file': amoptd['alignment_file'],
'homolog_aligner': amoptd['homolog_aligner'],
}
# strip out any that are None
if sys.version_info.major == 3:
kwargs = {k: v for k, v in kwargs.items() if v is not None}
else:
kwargs = {k: v for k, v in kwargs.iteritems() if v is not None}
return self.generate_ensembles(models, **kwargs)
|
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" test_template.py """
import json
import logging
import os
import time
import urllib
import signal
import subprocess
from collections import namedtuple
from ..common import status
# Test input. Please set each variable as it's own line, ended with \n, otherwise the value of lines
# passed into the topology will be incorrect, and the test will fail.
TEST_INPUT = ["1\n", "2\n", "3\n", "4\n", "5\n", "6\n", "7\n", "8\n",
"9\n", "10\n", "11\n", "12\n"]
# Retry variables in case the output is different from the input
RETRY_COUNT = 5
RETRY_INTERVAL = 10
# Topology shard definitions
NON_TMASTER_SHARD = 1
# Topology process name definitions
STMGR = 'stmgr'
HERON_BIN = "bin"
HERON_CORE = "heron-core"
HERON_METRICSMGR = 'metricsmgr'
HERON_SANDBOX_HOME = "."
HERON_STMGR = "heron-stmgr"
HERON_STMGR_CMD = os.path.join(HERON_SANDBOX_HOME, HERON_CORE, HERON_BIN, HERON_STMGR)
ProcessTuple = namedtuple('ProcessTuple', 'pid cmd')
class TestTemplate(object):
""" Class that encapsulates the template used for integration tests. Intended to be abstract and
subclassed for specific tests. """
def __init__(self, testname, params):
self.testname = testname
self.params = params
# pylint: disable=too-many-return-statements, too-many-branches,
# pylint: disable=too-many-statements
def run_test(self):
""" Runs the test template. Must either return TestSuccess or raise TestFailure"""
topology_submitted = False
try:
# prepare test data, start the topology and block until it's running
self._prepare_test_data()
self.submit_topology()
topology_submitted = True
_block_until_stmgr_running(self.get_expected_container_count())
self._block_until_topology_running(self.get_expected_min_instance_count())
# Execute the specific test logic and block until topology is running again
self.execute_test_case()
_block_until_stmgr_running(self.get_expected_container_count())
physical_plan_json =\
self._block_until_topology_running(self.get_expected_min_instance_count())
# trigger the test data to flow and invoke the pre_check_results hook
self._inject_test_data()
self.pre_check_results(physical_plan_json)
# finally verify the expected results
result = self._check_results()
return result
except status.TestFailure as e:
raise e
except Exception as e:
raise status.TestFailure("Exception thrown during test", e)
finally:
if topology_submitted:
self.cleanup_test()
def submit_topology(self):
_submit_topology(
self.params['cliPath'],
self.params['cluster'],
self.params['testJarPath'],
self.params['topologyClassPath'],
self.params['topologyName'],
self.params['readFile'],
self.params['outputFile']
)
# pylint: disable=no-self-use
def get_expected_container_count(self):
return 1
# pylint: disable=no-self-use
def get_expected_min_instance_count(self):
return 1
def execute_test_case(self):
pass
# pylint: disable=no-self-use,unused-argument
def pre_check_results(self, physical_plan_json):
return True
def cleanup_test(self):
try:
_kill_topology(self.params['cliPath'], self.params['cluster'], self.params['topologyName'])
except Exception as e:
logging.error("Failed to kill %s topology: %s", self.params['topologyName'], str(e))
finally:
self._delete_test_data_files()
def _delete_test_data_files(self):
_safe_delete_file(self.params['readFile'])
_safe_delete_file(self.params['outputFile'])
def _prepare_test_data(self):
self._delete_test_data_files()
# insert lines into temp file and then move to read file
try:
with open('temp.txt', 'w') as f:
for line in TEST_INPUT:
f.write(line)
except Exception as e:
logging.error("Failed to write to temp.txt file: %s", str(e))
return False
def _inject_test_data(self):
# move to read file. This guarantees contents will be put into the file the
# spout is reading from atomically
# which increases the determinism
os.rename('temp.txt', self.params['readFile'])
def _check_results(self):
""" get actual and expected result.
retry if results are not equal a predesignated amount of times
"""
expected_result = ""
actual_result = ""
retries_left = RETRY_COUNT
_sleep("before trying to check results for test %s" % self.testname, RETRY_INTERVAL)
while retries_left > 0:
retries_left -= 1
try:
with open(self.params['readFile'], 'r') as f:
expected_result = f.read()
with open(self.params['outputFile'], 'r') as g:
actual_result = g.read()
except Exception as e:
message =\
"Failed to read expected or actual results from file for test %s: %s" % self.testname
if retries_left == 0:
raise status.TestFailure(message, e)
logging.error(message, e)
# if we get expected result, no need to retry
expected_sorted = sorted(expected_result.split('\n'))
actual_sorted = sorted(actual_result.split('\n'))
if expected_sorted == actual_sorted:
break
if retries_left > 0:
expected_result = ""
actual_result = ""
expected_sorted = []
actual_sorted = []
logging.info("Failed to get expected results for test %s (attempt %s/%s), "\
+ "retrying after %s seconds",
self.testname, RETRY_COUNT - retries_left, RETRY_COUNT, RETRY_INTERVAL)
time.sleep(RETRY_INTERVAL)
# Compare the actual and expected result
if actual_sorted == expected_sorted:
success = status.TestSuccess(
"Actual result matched expected result for test %s" % self.testname)
logging.info("Actual result ---------- \n%s", actual_sorted)
logging.info("Expected result ---------- \n%s", expected_sorted)
return success
else:
failure = status.TestFailure(
"Actual result did not match expected result for test %s" % self.testname)
logging.info("Actual result ---------- \n%s", actual_sorted)
logging.info("Expected result ---------- \n%s", expected_sorted)
raise failure
# pylint: disable=no-self-use
def get_pid(self, process_name, heron_working_directory):
"""
opens .pid file of process and reads the first and only line, which should be the process pid
if fail, return -1
"""
process_pid_file = os.path.join(heron_working_directory, process_name + '.pid')
try:
with open(process_pid_file, 'r') as f:
pid = f.readline()
return pid
except Exception:
logging.error("Unable to open file %s", process_pid_file)
return -1
# pylint: disable=no-self-use
def kill_process(self, process_number):
""" kills process by running unix command kill """
if process_number < 1:
raise RuntimeError(
"Not attempting to kill process id < 1 passed to kill_process: %d" % process_number)
logging.info("Killing process number %s", process_number)
try:
os.kill(int(process_number), signal.SIGTERM)
except OSError as ex:
if "No such process" in str(ex): # killing a non-existing process condsidered as success
logging.info(str(ex))
else:
raise RuntimeError("Unable to kill process %s" % process_number)
except Exception:
raise RuntimeError("Unable to kill process %s" % process_number)
logging.info("Killed process number %s", process_number)
def kill_strmgr(self):
logging.info("Executing kill stream manager")
stmgr_pid = self.get_pid('%s-%d' % (STMGR, NON_TMASTER_SHARD), self.params['workingDirectory'])
self.kill_process(stmgr_pid)
def kill_metricsmgr(self):
logging.info("Executing kill metrics manager")
metricsmgr_pid = self.get_pid(
'%s-%d' % (HERON_METRICSMGR, NON_TMASTER_SHARD), self.params['workingDirectory'])
self.kill_process(metricsmgr_pid)
def _get_tracker_pplan(self):
url = 'http://localhost:%s/topologies/physicalplan?' % self.params['trackerPort']\
+ 'cluster=local&environ=default&topology=IntegrationTest_LocalReadWriteTopology'
logging.debug("Fetching packing plan from %s", url)
response = urllib.urlopen(url)
physical_plan_json = json.loads(response.read())
if 'result' not in physical_plan_json:
raise status.TestFailure(
"Could not find result json in physical plan request to tracker: %s" % url)
return physical_plan_json['result']
def _block_until_topology_running(self, min_instances):
retries_left = RETRY_COUNT
_sleep("before trying to fetch pplan for test %s" % self.testname, RETRY_INTERVAL)
while retries_left > 0:
retries_left -= 1
packing_plan = self._get_tracker_pplan()
if packing_plan:
instances_found = len(packing_plan['instances'])
if instances_found >= min_instances:
logging.info("Successfully fetched pplan from tracker for test %s after %s attempts.",
self.testname, RETRY_COUNT - retries_left)
return packing_plan
elif retries_left == 0:
raise status.TestFailure(
"Got packing plan from tracker for test %s but the number of " % self.testname +
"instances found (%d) was less than min expected (%s)." %
(instances_found, min_instances))
if retries_left > 0:
_sleep("before trying again to fetch pplan for test %s (attempt %s/%s)" %
(self.testname, RETRY_COUNT - retries_left, RETRY_COUNT), RETRY_INTERVAL)
else:
raise status.TestFailure("Failed to get pplan from tracker for test %s after %s attempts."
% (self.testname, RETRY_COUNT))
def _block_until_stmgr_running(expected_stmgrs):
# block until ./heron-stmgr exists
process_list = _get_processes()
while not _processes_exists(process_list, HERON_STMGR_CMD, expected_stmgrs):
process_list = _get_processes()
time.sleep(1)
def _submit_topology(heron_cli_path, test_cluster, test_jar_path, topology_class_path,
topology_name, input_file, output_file):
""" Submit topology using heron-cli """
# unicode string messes up subprocess.call quotations, must change into string type
splitcmd = [
heron_cli_path, 'submit', '--verbose', '--', test_cluster, test_jar_path,
topology_class_path, topology_name, input_file, output_file, str(len(TEST_INPUT))
]
logging.info("Submitting topology: %s", splitcmd)
p = subprocess.Popen(splitcmd)
p.wait()
if p.returncode != 0:
raise status.TestFailure("Failed to submit topology %s" % topology_name)
logging.info("Submitted topology %s", topology_name)
def _kill_topology(heron_cli_path, test_cluster, topology_name):
""" Kill a topology using heron-cli """
splitcmd = [heron_cli_path, 'kill', test_cluster, topology_name]
logging.info("Killing topology: %s", ' '.join(splitcmd))
# this call can be blocking, no need for subprocess
if subprocess.call(splitcmd) != 0:
raise RuntimeError("Unable to kill the topology: %s" % topology_name)
def _get_processes():
"""
returns a list of process tuples (pid, cmd)
This only applies only for local scheduler as it uses the ps command
and assumes the topology will be running on different processes on same machine
"""
# pylint: disable=fixme
# TODO: if the submit fails before we get here (e.g., Topology already exists), this hangs
processes = subprocess.check_output(['ps', '-o', 'pid,args'])
processes = processes.split('\n')
processes = processes[1:] # remove first line, which is name of columns
process_list = []
for process in processes:
# remove empty lines
if process == '':
continue
pretuple = process.split(' ', 1)
process_list.append(ProcessTuple(pretuple[0], pretuple[1]))
return process_list
def _sleep(message, seconds):
logging.info("Sleeping for %d seconds %s", seconds, message)
time.sleep(seconds)
def _processes_exists(process_list, process_cmd, min_processes):
""" check if a process is running """
proccess_count = 0
for process in process_list:
if process_cmd in process.cmd:
proccess_count += 1
return proccess_count >= min_processes
def _safe_delete_file(file_name):
if os.path.isfile(file_name) and os.path.exists(file_name):
try:
os.remove(file_name)
except Exception as e:
logging.error("Failed to delete file: %s: %s", file_name, e)
return False
|
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Samplers for Contexts.
Each sampler class should define __call__(batch_size).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
import gin.tf
@gin.configurable
class BaseSampler(object):
"""Base sampler."""
def __init__(self, context_spec, context_range=None, k=2, scope='sampler'):
"""Construct a base sampler.
Args:
context_spec: A context spec.
context_range: A tuple of (minval, max), where minval, maxval are floats
or Numpy arrays with the same shape as the context.
scope: A string denoting scope.
"""
self._context_spec = context_spec
self._context_range = context_range
self._k = k
self._scope = scope
def __call__(self, batch_size, **kwargs):
raise NotImplementedError
def set_replay(self, replay=None):
pass
def _validate_contexts(self, contexts):
"""Validate if contexts have right spec.
Args:
contexts: A [batch_size, num_contexts_dim] tensor.
Raises:
ValueError: If shape or dtype mismatches that of spec.
"""
if contexts[0].shape != self._context_spec.shape:
raise ValueError('contexts has invalid shape %s wrt spec shape %s' %
(contexts[0].shape, self._context_spec.shape))
if contexts.dtype != self._context_spec.dtype:
raise ValueError('contexts has invalid dtype %s wrt spec dtype %s' %
(contexts.dtype, self._context_spec.dtype))
@gin.configurable
class ZeroSampler(BaseSampler):
"""Zero sampler."""
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context.
Args:
batch_size: Batch size.
Returns:
Two [batch_size, num_context_dims] tensors.
"""
contexts = tf.zeros(
dtype=self._context_spec.dtype,
shape=[
batch_size,
] + self._context_spec.shape.as_list())
return contexts, contexts
@gin.configurable
class BinarySampler(BaseSampler):
"""Binary sampler."""
def __init__(self, probs=0.5, *args, **kwargs):
"""Constructor."""
super(BinarySampler, self).__init__(*args, **kwargs)
self._probs = probs
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context."""
spec = self._context_spec
contexts = tf.random_uniform(
shape=[
batch_size,
] + spec.shape.as_list(), dtype=tf.float32)
contexts = tf.cast(tf.greater(contexts, self._probs), dtype=spec.dtype)
return contexts, contexts
@gin.configurable
class RandomSampler(BaseSampler):
"""Random sampler."""
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context.
Args:
batch_size: Batch size.
Returns:
Two [batch_size, num_context_dims] tensors.
"""
spec = self._context_spec
context_range = self._context_range
if isinstance(context_range[0], (int, float)):
contexts = tf.random_uniform(
shape=[
batch_size,
] + spec.shape.as_list(),
minval=context_range[0],
maxval=context_range[1],
dtype=spec.dtype)
elif isinstance(context_range[0], (list, tuple, np.ndarray)):
assert len(spec.shape.as_list()) == 1
assert spec.shape.as_list()[0] == len(context_range[0])
assert spec.shape.as_list()[0] == len(context_range[1])
contexts = tf.concat(
[
tf.random_uniform(
shape=[
batch_size, 1,
] + spec.shape.as_list()[1:],
minval=context_range[0][i],
maxval=context_range[1][i],
dtype=spec.dtype) for i in range(spec.shape.as_list()[0])
],
axis=1)
else: raise NotImplementedError(context_range)
self._validate_contexts(contexts)
state, next_state = kwargs['state'], kwargs['next_state']
if state is not None and next_state is not None:
pass
#contexts = tf.concat(
# [tf.random_normal(tf.shape(state[:, :self._k]), dtype=tf.float64) +
# tf.random_shuffle(state[:, :self._k]),
# contexts[:, self._k:]], 1)
return contexts, contexts
@gin.configurable
class ScheduledSampler(BaseSampler):
"""Scheduled sampler."""
def __init__(self,
scope='default',
values=None,
scheduler='cycle',
scheduler_params=None,
*args, **kwargs):
"""Construct sampler.
Args:
scope: Scope name.
values: A list of numbers or [num_context_dim] Numpy arrays
representing the values to cycle.
scheduler: scheduler type.
scheduler_params: scheduler parameters.
*args: arguments.
**kwargs: keyword arguments.
"""
super(ScheduledSampler, self).__init__(*args, **kwargs)
self._scope = scope
self._values = values
self._scheduler = scheduler
self._scheduler_params = scheduler_params or {}
assert self._values is not None and len(
self._values), 'must provide non-empty values.'
self._n = len(self._values)
# TODO(shanegu): move variable creation outside. resolve tf.cond problem.
self._count = 0
self._i = tf.Variable(
tf.zeros(shape=(), dtype=tf.int32),
name='%s-scheduled_sampler_%d' % (self._scope, self._count))
self._values = tf.constant(self._values, dtype=self._context_spec.dtype)
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context.
Args:
batch_size: Batch size.
Returns:
Two [batch_size, num_context_dims] tensors.
"""
spec = self._context_spec
next_op = self._next(self._i)
with tf.control_dependencies([next_op]):
value = self._values[self._i]
if value.get_shape().as_list():
values = tf.tile(
tf.expand_dims(value, 0), (batch_size,) + (1,) * spec.shape.ndims)
else:
values = value + tf.zeros(
shape=[
batch_size,
] + spec.shape.as_list(), dtype=spec.dtype)
self._validate_contexts(values)
self._count += 1
return values, values
def _next(self, i):
"""Return op that increments pointer to next value.
Args:
i: A tensorflow integer variable.
Returns:
Op that increments pointer.
"""
if self._scheduler == 'cycle':
inc = ('inc' in self._scheduler_params and
self._scheduler_params['inc']) or 1
return tf.assign(i, tf.mod(i+inc, self._n))
else:
raise NotImplementedError(self._scheduler)
@gin.configurable
class ReplaySampler(BaseSampler):
"""Replay sampler."""
def __init__(self,
prefetch_queue_capacity=2,
override_indices=None,
state_indices=None,
*args,
**kwargs):
"""Construct sampler.
Args:
prefetch_queue_capacity: Capacity for prefetch queue.
override_indices: Override indices.
state_indices: Select certain indices from state dimension.
*args: arguments.
**kwargs: keyword arguments.
"""
super(ReplaySampler, self).__init__(*args, **kwargs)
self._prefetch_queue_capacity = prefetch_queue_capacity
self._override_indices = override_indices
self._state_indices = state_indices
def set_replay(self, replay):
"""Set replay.
Args:
replay: A replay buffer.
"""
self._replay = replay
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context.
Args:
batch_size: Batch size.
Returns:
Two [batch_size, num_context_dims] tensors.
"""
batch = self._replay.GetRandomBatch(batch_size)
next_states = batch[4]
if self._prefetch_queue_capacity > 0:
batch_queue = slim.prefetch_queue.prefetch_queue(
[next_states],
capacity=self._prefetch_queue_capacity,
name='%s/batch_context_queue' % self._scope)
next_states = batch_queue.dequeue()
if self._override_indices is not None:
assert self._context_range is not None and isinstance(
self._context_range[0], (int, long, float))
next_states = tf.concat(
[
tf.random_uniform(
shape=next_states[:, :1].shape,
minval=self._context_range[0],
maxval=self._context_range[1],
dtype=next_states.dtype)
if i in self._override_indices else next_states[:, i:i + 1]
for i in range(self._context_spec.shape.as_list()[0])
],
axis=1)
if self._state_indices is not None:
next_states = tf.concat(
[
next_states[:, i:i + 1]
for i in range(self._context_spec.shape.as_list()[0])
],
axis=1)
self._validate_contexts(next_states)
return next_states, next_states
@gin.configurable
class TimeSampler(BaseSampler):
"""Time Sampler."""
def __init__(self, minval=0, maxval=1, timestep=-1, *args, **kwargs):
"""Construct sampler.
Args:
minval: Min value integer.
maxval: Max value integer.
timestep: Time step between states and next_states.
*args: arguments.
**kwargs: keyword arguments.
"""
super(TimeSampler, self).__init__(*args, **kwargs)
assert self._context_spec.shape.as_list() == [1]
self._minval = minval
self._maxval = maxval
self._timestep = timestep
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context.
Args:
batch_size: Batch size.
Returns:
Two [batch_size, num_context_dims] tensors.
"""
if self._maxval == self._minval:
contexts = tf.constant(
self._maxval, shape=[batch_size, 1], dtype=tf.int32)
else:
contexts = tf.random_uniform(
shape=[batch_size, 1],
dtype=tf.int32,
maxval=self._maxval,
minval=self._minval)
next_contexts = tf.maximum(contexts + self._timestep, 0)
return tf.cast(
contexts, dtype=self._context_spec.dtype), tf.cast(
next_contexts, dtype=self._context_spec.dtype)
@gin.configurable
class ConstantSampler(BaseSampler):
"""Constant sampler."""
def __init__(self, value=None, *args, **kwargs):
"""Construct sampler.
Args:
value: A list or Numpy array for values of the constant.
*args: arguments.
**kwargs: keyword arguments.
"""
super(ConstantSampler, self).__init__(*args, **kwargs)
self._value = value
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context.
Args:
batch_size: Batch size.
Returns:
Two [batch_size, num_context_dims] tensors.
"""
spec = self._context_spec
value_ = tf.constant(self._value, shape=spec.shape, dtype=spec.dtype)
values = tf.tile(
tf.expand_dims(value_, 0), (batch_size,) + (1,) * spec.shape.ndims)
self._validate_contexts(values)
return values, values
@gin.configurable
class DirectionSampler(RandomSampler):
"""Direction sampler."""
def __call__(self, batch_size, **kwargs):
"""Sample a batch of context.
Args:
batch_size: Batch size.
Returns:
Two [batch_size, num_context_dims] tensors.
"""
spec = self._context_spec
context_range = self._context_range
if isinstance(context_range[0], (int, float)):
contexts = tf.random_uniform(
shape=[
batch_size,
] + spec.shape.as_list(),
minval=context_range[0],
maxval=context_range[1],
dtype=spec.dtype)
elif isinstance(context_range[0], (list, tuple, np.ndarray)):
assert len(spec.shape.as_list()) == 1
assert spec.shape.as_list()[0] == len(context_range[0])
assert spec.shape.as_list()[0] == len(context_range[1])
contexts = tf.concat(
[
tf.random_uniform(
shape=[
batch_size, 1,
] + spec.shape.as_list()[1:],
minval=context_range[0][i],
maxval=context_range[1][i],
dtype=spec.dtype) for i in range(spec.shape.as_list()[0])
],
axis=1)
else: raise NotImplementedError(context_range)
self._validate_contexts(contexts)
if 'sampler_fn' in kwargs:
other_contexts = kwargs['sampler_fn']()
else:
other_contexts = contexts
state, next_state = kwargs['state'], kwargs['next_state']
if state is not None and next_state is not None:
my_context_range = (np.array(context_range[1]) - np.array(context_range[0])) / 2 * np.ones(spec.shape.as_list())
contexts = tf.concat(
[0.1 * my_context_range[:self._k] *
tf.random_normal(tf.shape(state[:, :self._k]), dtype=state.dtype) +
tf.random_shuffle(state[:, :self._k]) - state[:, :self._k],
other_contexts[:, self._k:]], 1)
#contexts = tf.Print(contexts,
# [contexts, tf.reduce_max(contexts, 0),
# tf.reduce_min(state, 0), tf.reduce_max(state, 0)], 'contexts', summarize=15)
next_contexts = tf.concat( #LALA
[state[:, :self._k] + contexts[:, :self._k] - next_state[:, :self._k],
other_contexts[:, self._k:]], 1)
next_contexts = contexts #LALA cosine
else:
next_contexts = contexts
return tf.stop_gradient(contexts), tf.stop_gradient(next_contexts)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import unittest
from datetime import datetime
from typing import Any
from unittest import mock
from google.api_core.exceptions import AlreadyExists
from google.api_core.retry import Retry
from airflow.providers.google.cloud.operators.dataproc import (
ClusterGenerator, DataprocCreateClusterOperator, DataprocDeleteClusterOperator,
DataprocInstantiateInlineWorkflowTemplateOperator, DataprocInstantiateWorkflowTemplateOperator,
DataprocScaleClusterOperator, DataprocSubmitHadoopJobOperator, DataprocSubmitHiveJobOperator,
DataprocSubmitJobOperator, DataprocSubmitPigJobOperator, DataprocSubmitPySparkJobOperator,
DataprocSubmitSparkJobOperator, DataprocSubmitSparkSqlJobOperator, DataprocUpdateClusterOperator,
)
from airflow.version import version as airflow_version
cluster_params = inspect.signature(ClusterGenerator.__init__).parameters
AIRFLOW_VERSION = "v" + airflow_version.replace(".", "-").replace("+", "-")
DATAPROC_PATH = "airflow.providers.google.cloud.operators.dataproc.{}"
TASK_ID = "task-id"
GCP_PROJECT = "test-project"
GCP_LOCATION = "test-location"
GCP_CONN_ID = "test-conn"
CLUSTER_NAME = "cluster_name"
CLUSTER = {
"project_id": "project_id",
"cluster_name": CLUSTER_NAME,
"config": {
"gce_cluster_config": {
"zone_uri": "https://www.googleapis.com/compute/v1/projects/"
"project_id/zones/zone",
"metadata": {"metadata": "data"},
"network_uri": "network_uri",
"subnetwork_uri": "subnetwork_uri",
"internal_ip_only": True,
"tags": ["tags"],
"service_account": "service_account",
"service_account_scopes": ["service_account_scopes"],
},
"master_config": {
"num_instances": 2,
"machine_type_uri": "https://www.googleapis.com/compute/v1/projects/"
"project_id/zones/zone/machineTypes/master_machine_type",
"disk_config": {
"boot_disk_type": "master_disk_type",
"boot_disk_size_gb": 128,
},
"image_uri": "https://www.googleapis.com/compute/beta/projects/"
"custom_image_project_id/global/images/custom_image",
},
"worker_config": {
"num_instances": 2,
"machine_type_uri": "https://www.googleapis.com/compute/v1/projects/"
"project_id/zones/zone/machineTypes/worker_machine_type",
"disk_config": {
"boot_disk_type": "worker_disk_type",
"boot_disk_size_gb": 256,
},
"image_uri": "https://www.googleapis.com/compute/beta/projects/"
"custom_image_project_id/global/images/custom_image",
},
"secondary_worker_config": {
"num_instances": 4,
"machine_type_uri": "https://www.googleapis.com/compute/v1/projects/"
"project_id/zones/zone/machineTypes/worker_machine_type",
"disk_config": {
"boot_disk_type": "worker_disk_type",
"boot_disk_size_gb": 256,
},
"is_preemptible": True,
},
"software_config": {
"properties": {"properties": "data"},
"optional_components": ["optional_components"],
},
"lifecycle_config": {
"idle_delete_ttl": "60s",
"auto_delete_time": "2019-09-12T00:00:00.000000Z",
},
"encryption_config": {"gce_pd_kms_key_name": "customer_managed_key"},
"autoscaling_config": {"policy_uri": "autoscaling_policy"},
"config_bucket": "storage_bucket",
"initialization_actions": [
{"executable_file": "init_actions_uris", "execution_timeout": "600s"}
],
},
"labels": {"labels": "data", "airflow-version": AIRFLOW_VERSION},
}
UPDATE_MASK = {
"paths": [
"config.worker_config.num_instances",
"config.secondary_worker_config.num_instances",
]
}
TIMEOUT = 120
RETRY = mock.MagicMock(Retry)
METADATA = [("key", "value")]
REQUEST_ID = "request_id_uuid"
def assert_warning(msg: str, warning: Any):
assert any(msg in str(w) for w in warning.warnings)
class TestsClusterGenerator(unittest.TestCase):
def test_image_version(self):
with self.assertRaises(ValueError) as err:
ClusterGenerator(custom_image="custom_image", image_version="image_version")
self.assertIn("custom_image and image_version", str(err))
def test_nodes_number(self):
with self.assertRaises(AssertionError) as err:
ClusterGenerator(num_workers=0, num_preemptible_workers=0)
self.assertIn("num_workers == 0 means single", str(err))
def test_build(self):
generator = ClusterGenerator(
project_id="project_id",
cluster_name="cluster_name",
num_workers=2,
zone="zone",
network_uri="network_uri",
subnetwork_uri="subnetwork_uri",
internal_ip_only=True,
tags=["tags"],
storage_bucket="storage_bucket",
init_actions_uris=["init_actions_uris"],
init_action_timeout="10m",
metadata={"metadata": "data"},
custom_image="custom_image",
custom_image_project_id="custom_image_project_id",
autoscaling_policy="autoscaling_policy",
properties={"properties": "data"},
optional_components=["optional_components"],
num_masters=2,
master_machine_type="master_machine_type",
master_disk_type="master_disk_type",
master_disk_size=128,
worker_machine_type="worker_machine_type",
worker_disk_type="worker_disk_type",
worker_disk_size=256,
num_preemptible_workers=4,
labels={"labels": "data"},
region="region",
service_account="service_account",
service_account_scopes=["service_account_scopes"],
idle_delete_ttl=60,
auto_delete_time=datetime(2019, 9, 12),
auto_delete_ttl=250,
customer_managed_key="customer_managed_key",
)
cluster = generator.make()
self.assertDictEqual(CLUSTER, cluster)
class TestDataprocClusterCreateOperator(unittest.TestCase):
@mock.patch(DATAPROC_PATH.format("inspect.signature"))
@mock.patch(DATAPROC_PATH.format("ClusterGenerator"))
def test_depreciation_warning(self, mock_generator, mock_signature):
mock_signature.return_value.parameters = cluster_params
with self.assertWarns(DeprecationWarning) as warning:
DataprocCreateClusterOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_name="cluster_name",
num_workers=2,
zone="zone",
)
assert_warning("Passing cluster parameters by keywords", warning)
mock_generator.assert_called_once_with(
task_id=TASK_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_name="cluster_name",
num_workers=2,
zone="zone",
)
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
op = DataprocCreateClusterOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster=CLUSTER,
request_id=REQUEST_ID,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_cluster.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster=CLUSTER,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute_if_cluster_exists(self, mock_hook):
mock_hook.return_value.create_cluster.side_effect = [AlreadyExists("test")]
op = DataprocCreateClusterOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster=CLUSTER,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
request_id=REQUEST_ID,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_cluster.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster=CLUSTER,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.get_cluster.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestDataprocClusterScaleOperator(unittest.TestCase):
def test_depreciation_warning(self):
with self.assertWarns(DeprecationWarning) as warning:
DataprocScaleClusterOperator(
task_id=TASK_ID, cluster_name=CLUSTER_NAME, project_id=GCP_PROJECT
)
assert_warning("DataprocUpdateClusterOperator", warning)
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
cluster_update = {
"config": {
"worker_config": {"num_instances": 3},
"secondary_worker_config": {"num_instances": 4},
}
}
op = DataprocScaleClusterOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=GCP_PROJECT,
region=GCP_LOCATION,
num_workers=3,
num_preemptible_workers=4,
graceful_decommission_timeout="10m",
gcp_conn_id=GCP_CONN_ID,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cluster_name=CLUSTER_NAME,
cluster=cluster_update,
graceful_decommission_timeout={"seconds": 600},
update_mask=UPDATE_MASK,
)
class TestDataprocClusterDeleteOperator(unittest.TestCase):
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
op = DataprocDeleteClusterOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
request_id=REQUEST_ID,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_cluster.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
cluster_uuid=None,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestDataprocSubmitJobOperator(unittest.TestCase):
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
job = {}
job_id = "job_id"
mock_hook.return_value.wait_for_job.return_value = None
mock_hook.return_value.submit_job.return_value.reference.job_id = job_id
op = DataprocSubmitJobOperator(
task_id=TASK_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
job=job,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
request_id=REQUEST_ID,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.submit_job.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
job=job,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.wait_for_job.assert_called_once_with(
job_id=job_id, project_id=GCP_PROJECT, location=GCP_LOCATION
)
class TestDataprocUpdateClusterOperator(unittest.TestCase):
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
op = DataprocUpdateClusterOperator(
task_id=TASK_ID,
location=GCP_LOCATION,
cluster_name=CLUSTER_NAME,
cluster=CLUSTER,
update_mask=UPDATE_MASK,
request_id=REQUEST_ID,
graceful_decommission_timeout={"graceful_decommission_timeout": "600s"},
project_id=GCP_PROJECT,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_called_once_with(
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
cluster=CLUSTER,
update_mask=UPDATE_MASK,
graceful_decommission_timeout={"graceful_decommission_timeout": "600s"},
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestDataprocWorkflowTemplateInstantiateOperator(unittest.TestCase):
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
template_id = "template_id"
version = 6
parameters = {}
op = DataprocInstantiateWorkflowTemplateOperator(
task_id=TASK_ID,
template_id=template_id,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
version=version,
parameters=parameters,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.instantiate_workflow_template.assert_called_once_with(
template_name=template_id,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
version=version,
parameters=parameters,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestDataprocWorkflowTemplateInstantiateInlineOperator(unittest.TestCase):
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
template = {}
op = DataprocInstantiateInlineWorkflowTemplateOperator(
task_id=TASK_ID,
template=template,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.instantiate_inline_workflow_template.assert_called_once_with(
template=template,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestDataProcHiveOperator(unittest.TestCase):
query = "define sin HiveUDF('sin');"
variables = {"key": "value"}
job_id = "uuid_id"
job = {
"reference": {
"project_id": GCP_PROJECT,
"job_id": "{{task.task_id}}_{{ds_nodash}}_" + job_id,
},
"placement": {"cluster_name": "cluster-1"},
"labels": {"airflow-version": AIRFLOW_VERSION},
"hive_job": {"query_list": {"queries": [query]}, "script_variables": variables},
}
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_depreciation_warning(self, mock_hook):
with self.assertWarns(DeprecationWarning) as warning:
DataprocSubmitHiveJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
query="query",
)
assert_warning("DataprocSubmitJobOperator", warning)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook, mock_uuid):
mock_uuid.return_value = self.job_id
mock_hook.return_value.project_id = GCP_PROJECT
mock_hook.return_value.wait_for_job.return_value = None
mock_hook.return_value.submit_job.return_value.reference.job_id = self.job_id
op = DataprocSubmitHiveJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
query=self.query,
variables=self.variables,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.submit_job.assert_called_once_with(
project_id=GCP_PROJECT, job=self.job, location=GCP_LOCATION
)
mock_hook.return_value.wait_for_job.assert_called_once_with(
job_id=self.job_id, location=GCP_LOCATION, project_id=GCP_PROJECT
)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_builder(self, mock_hook, mock_uuid):
mock_hook.return_value.project_id = GCP_PROJECT
mock_uuid.return_value = self.job_id
op = DataprocSubmitHiveJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
query=self.query,
variables=self.variables,
)
job = op.generate_job()
self.assertDictEqual(self.job, job)
class TestDataProcPigOperator(unittest.TestCase):
query = "define sin HiveUDF('sin');"
variables = {"key": "value"}
job_id = "uuid_id"
job = {
"reference": {
"project_id": GCP_PROJECT,
"job_id": "{{task.task_id}}_{{ds_nodash}}_" + job_id,
},
"placement": {"cluster_name": "cluster-1"},
"labels": {"airflow-version": AIRFLOW_VERSION},
"pig_job": {"query_list": {"queries": [query]}, "script_variables": variables},
}
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_depreciation_warning(self, mock_hook):
with self.assertWarns(DeprecationWarning) as warning:
DataprocSubmitPigJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
query="query",
)
assert_warning("DataprocSubmitJobOperator", warning)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook, mock_uuid):
mock_uuid.return_value = self.job_id
mock_hook.return_value.project_id = GCP_PROJECT
mock_hook.return_value.wait_for_job.return_value = None
mock_hook.return_value.submit_job.return_value.reference.job_id = self.job_id
op = DataprocSubmitPigJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
query=self.query,
variables=self.variables,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.submit_job.assert_called_once_with(
project_id=GCP_PROJECT, job=self.job, location=GCP_LOCATION
)
mock_hook.return_value.wait_for_job.assert_called_once_with(
job_id=self.job_id, location=GCP_LOCATION, project_id=GCP_PROJECT
)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_builder(self, mock_hook, mock_uuid):
mock_hook.return_value.project_id = GCP_PROJECT
mock_uuid.return_value = self.job_id
op = DataprocSubmitPigJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
query=self.query,
variables=self.variables,
)
job = op.generate_job()
self.assertDictEqual(self.job, job)
class TestDataProcSparkSqlOperator(unittest.TestCase):
query = "SHOW DATABASES;"
variables = {"key": "value"}
job_id = "uuid_id"
job = {
"reference": {
"project_id": GCP_PROJECT,
"job_id": "{{task.task_id}}_{{ds_nodash}}_" + job_id,
},
"placement": {"cluster_name": "cluster-1"},
"labels": {"airflow-version": AIRFLOW_VERSION},
"spark_sql_job": {
"query_list": {"queries": [query]},
"script_variables": variables,
},
}
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_depreciation_warning(self, mock_hook):
with self.assertWarns(DeprecationWarning) as warning:
DataprocSubmitSparkSqlJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
query="query",
)
assert_warning("DataprocSubmitJobOperator", warning)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook, mock_uuid):
mock_uuid.return_value = self.job_id
mock_hook.return_value.project_id = GCP_PROJECT
mock_hook.return_value.wait_for_job.return_value = None
mock_hook.return_value.submit_job.return_value.reference.job_id = self.job_id
op = DataprocSubmitSparkSqlJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
query=self.query,
variables=self.variables,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.submit_job.assert_called_once_with(
project_id=GCP_PROJECT, job=self.job, location=GCP_LOCATION
)
mock_hook.return_value.wait_for_job.assert_called_once_with(
job_id=self.job_id, location=GCP_LOCATION, project_id=GCP_PROJECT
)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_builder(self, mock_hook, mock_uuid):
mock_hook.return_value.project_id = GCP_PROJECT
mock_uuid.return_value = self.job_id
op = DataprocSubmitSparkSqlJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
query=self.query,
variables=self.variables,
)
job = op.generate_job()
self.assertDictEqual(self.job, job)
class TestDataProcSparkOperator(unittest.TestCase):
main_class = "org.apache.spark.examples.SparkPi"
jars = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"]
job_id = "uuid_id"
job = {
"reference": {
"project_id": GCP_PROJECT,
"job_id": "{{task.task_id}}_{{ds_nodash}}_" + job_id,
},
"placement": {"cluster_name": "cluster-1"},
"labels": {"airflow-version": AIRFLOW_VERSION},
"spark_job": {"jar_file_uris": jars, "main_class": main_class},
}
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_depreciation_warning(self, mock_hook):
with self.assertWarns(DeprecationWarning) as warning:
DataprocSubmitSparkJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
main_class=self.main_class,
dataproc_jars=self.jars,
)
assert_warning("DataprocSubmitJobOperator", warning)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook, mock_uuid):
mock_uuid.return_value = self.job_id
mock_hook.return_value.project_id = GCP_PROJECT
mock_uuid.return_value = self.job_id
op = DataprocSubmitSparkJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
main_class=self.main_class,
dataproc_jars=self.jars,
)
job = op.generate_job()
self.assertDictEqual(self.job, job)
class TestDataProcHadoopOperator(unittest.TestCase):
args = ["wordcount", "gs://pub/shakespeare/rose.txt"]
jar = "file:///usr/lib/spark/examples/jars/spark-examples.jar"
job_id = "uuid_id"
job = {
"reference": {
"project_id": GCP_PROJECT,
"job_id": "{{task.task_id}}_{{ds_nodash}}_" + job_id,
},
"placement": {"cluster_name": "cluster-1"},
"labels": {"airflow-version": AIRFLOW_VERSION},
"hadoop_job": {"main_jar_file_uri": jar, "args": args},
}
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_depreciation_warning(self, mock_hook):
with self.assertWarns(DeprecationWarning) as warning:
DataprocSubmitHadoopJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
main_jar=self.jar,
arguments=self.args,
)
assert_warning("DataprocSubmitJobOperator", warning)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook, mock_uuid):
mock_uuid.return_value = self.job_id
mock_hook.return_value.project_id = GCP_PROJECT
mock_uuid.return_value = self.job_id
op = DataprocSubmitHadoopJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
gcp_conn_id=GCP_CONN_ID,
main_jar=self.jar,
arguments=self.args,
)
job = op.generate_job()
self.assertDictEqual(self.job, job)
class TestDataProcPySparkOperator(unittest.TestCase):
uri = "gs://{}/{}"
job_id = "uuid_id"
job = {
"reference": {
"project_id": GCP_PROJECT,
"job_id": "{{task.task_id}}_{{ds_nodash}}_" + job_id,
},
"placement": {"cluster_name": "cluster-1"},
"labels": {"airflow-version": AIRFLOW_VERSION},
"pyspark_job": {"main_python_file_uri": uri},
}
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_depreciation_warning(self, mock_hook):
with self.assertWarns(DeprecationWarning) as warning:
DataprocSubmitPySparkJobOperator(
task_id=TASK_ID,
region=GCP_LOCATION,
main=self.uri,
)
assert_warning("DataprocSubmitJobOperator", warning)
@mock.patch(DATAPROC_PATH.format("uuid.uuid4"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook, mock_uuid):
mock_hook.return_value.project_id = GCP_PROJECT
mock_uuid.return_value = self.job_id
op = DataprocSubmitPySparkJobOperator(
task_id=TASK_ID, region=GCP_LOCATION, gcp_conn_id=GCP_CONN_ID, main=self.uri
)
job = op.generate_job()
self.assertDictEqual(self.job, job)
|
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import gzip
import logging
import optparse
import os
import re
import select
import shutil
import sys
import threading
import time
import webbrowser
import zipfile
import zlib
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
_TRACE_VIEWER_TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<title>%(title)s</title>
<style>
%(timeline_css)s
</style>
<style>
.view {
overflow: hidden;
position: absolute;
top: 0;
bottom: 0;
left: 0;
right: 0;
}
</style>
<script>
%(timeline_js)s
</script>
<script>
document.addEventListener('DOMContentLoaded', function() {
var trace_data = window.atob('%(trace_data_base64)s');
var m = new tracing.TraceModel(trace_data);
var timelineViewEl = document.querySelector('.view');
ui.decorate(timelineViewEl, tracing.TimelineView);
timelineViewEl.model = m;
timelineViewEl.tabIndex = 1;
timelineViewEl.timeline.focusElement = timelineViewEl;
});
</script>
</head>
<body>
<div class="view"></view>
</body>
</html>"""
_DEFAULT_CHROME_CATEGORIES = '_DEFAULT_CHROME_CATEGORIES'
def _GetTraceTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
def _PackageTraceAsHtml(trace_file_name, html_file_name):
trace_viewer_root = os.path.join(constants.DIR_SOURCE_ROOT,
'third_party', 'trace-viewer')
build_dir = os.path.join(trace_viewer_root, 'build')
src_dir = os.path.join(trace_viewer_root, 'src')
if not build_dir in sys.path:
sys.path.append(build_dir)
generate = __import__('generate', {}, {})
parse_deps = __import__('parse_deps', {}, {})
basename = os.path.splitext(trace_file_name)[0]
load_sequence = parse_deps.calc_load_sequence(
['tracing/standalone_timeline_view.js'], [src_dir])
with open(trace_file_name) as trace_file:
trace_data = base64.b64encode(trace_file.read())
with open(html_file_name, 'w') as html_file:
html = _TRACE_VIEWER_TEMPLATE % {
'title': os.path.basename(os.path.splitext(trace_file_name)[0]),
'timeline_js': generate.generate_js(load_sequence),
'timeline_css': generate.generate_css(load_sequence),
'trace_data_base64': trace_data
}
html_file.write(html)
class ChromeTracingController(object):
def __init__(self, adb, package_info, categories, ring_buffer):
self._adb = adb
self._package_info = package_info
self._categories = categories
self._ring_buffer = ring_buffer
self._trace_file = None
self._trace_interval = None
self._trace_start_re = \
re.compile(r'Logging performance trace to file: (.*)')
self._trace_finish_re = \
re.compile(r'Profiler finished[.] Results are in (.*)[.]')
self._adb.StartMonitoringLogcat(clear=False)
def __str__(self):
return 'chrome trace'
def StartTracing(self, interval):
self._trace_interval = interval
self._adb.SyncLogCat()
self._adb.BroadcastIntent(self._package_info.package, 'GPU_PROFILER_START',
'-e categories "%s"' % ','.join(self._categories),
'-e continuous' if self._ring_buffer else '')
# Chrome logs two different messages related to tracing:
#
# 1. "Logging performance trace to file [...]"
# 2. "Profiler finished. Results are in [...]"
#
# The first one is printed when tracing starts and the second one indicates
# that the trace file is ready to be pulled.
try:
self._trace_file = self._adb.WaitForLogMatch(self._trace_start_re,
None,
timeout=5).group(1)
except pexpect.TIMEOUT:
raise RuntimeError('Trace start marker not found. Is the correct version '
'of the browser running?')
def StopTracing(self):
if not self._trace_file:
return
self._adb.BroadcastIntent(self._package_info.package, 'GPU_PROFILER_STOP')
self._adb.WaitForLogMatch(self._trace_finish_re, None, timeout=120)
def PullTrace(self):
# Wait a bit for the browser to finish writing the trace file.
time.sleep(self._trace_interval / 4 + 1)
trace_file = self._trace_file.replace('/storage/emulated/0/', '/sdcard/')
host_file = os.path.join(os.path.curdir, os.path.basename(trace_file))
self._adb.PullFileFromDevice(trace_file, host_file)
return host_file
_SYSTRACE_OPTIONS = [
# Compress the trace before sending it over USB.
'-z',
# Use a large trace buffer to increase the polling interval.
'-b', '16384'
]
# Interval in seconds for sampling systrace data.
_SYSTRACE_INTERVAL = 15
class SystraceController(object):
def __init__(self, adb, categories, ring_buffer):
self._adb = adb
self._categories = categories
self._ring_buffer = ring_buffer
self._done = threading.Event()
self._thread = None
self._trace_data = None
def __str__(self):
return 'systrace'
@staticmethod
def GetCategories(adb):
return adb.RunShellCommand('atrace --list_categories')
def StartTracing(self, interval):
self._thread = threading.Thread(target=self._CollectData)
self._thread.start()
def StopTracing(self):
self._done.set()
def PullTrace(self):
self._thread.join()
self._thread = None
if self._trace_data:
output_name = 'systrace-%s' % _GetTraceTimestamp()
with open(output_name, 'w') as out:
out.write(self._trace_data)
return output_name
def _RunATraceCommand(self, command):
# We use a separate interface to adb because the one from AndroidCommands
# isn't re-entrant.
device = ['-s', self._adb.GetDevice()] if self._adb.GetDevice() else []
cmd = ['adb'] + device + ['shell', 'atrace', '--%s' % command] + \
_SYSTRACE_OPTIONS + self._categories
return cmd_helper.GetCmdOutput(cmd)
def _CollectData(self):
trace_data = []
self._RunATraceCommand('async_start')
try:
while not self._done.is_set():
self._done.wait(_SYSTRACE_INTERVAL)
if not self._ring_buffer or self._done.is_set():
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_dump')))
finally:
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_stop')))
self._trace_data = ''.join([zlib.decompress(d) for d in trace_data])
@staticmethod
def _DecodeTraceData(trace_data):
try:
trace_start = trace_data.index('TRACE:')
except ValueError:
raise RuntimeError('Systrace start marker not found')
trace_data = trace_data[trace_start + 6:]
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
# Skip the initial newline.
return trace_data[1:]
def _GetSupportedBrowsers():
# Add aliases for backwards compatibility.
supported_browsers = {
'stable': constants.PACKAGE_INFO['chrome_stable'],
'beta': constants.PACKAGE_INFO['chrome_beta'],
'dev': constants.PACKAGE_INFO['chrome_dev'],
'build': constants.PACKAGE_INFO['chrome'],
}
supported_browsers.update(constants.PACKAGE_INFO)
unsupported_browsers = ['content_browsertests', 'gtest', 'legacy_browser']
for browser in unsupported_browsers:
del supported_browsers[browser]
return supported_browsers
def _CompressFile(host_file, output):
with gzip.open(output, 'wb') as out:
with open(host_file, 'rb') as input_file:
out.write(input_file.read())
os.unlink(host_file)
def _ArchiveFiles(host_files, output):
with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) as z:
for host_file in host_files:
z.write(host_file)
os.unlink(host_file)
def _PrintMessage(heading, eol='\n'):
sys.stdout.write('%s%s' % (heading, eol))
sys.stdout.flush()
def _WaitForEnter(timeout):
select.select([sys.stdin], [], [], timeout)
def _StartTracing(controllers, interval):
for controller in controllers:
controller.StartTracing(interval)
def _StopTracing(controllers):
for controller in controllers:
controller.StopTracing()
def _PullTraces(controllers, output, compress, write_html):
_PrintMessage('Downloading...', eol='')
trace_files = []
for controller in controllers:
trace_files.append(controller.PullTrace())
if compress and len(trace_files) == 1:
result = output or trace_files[0] + '.gz'
_CompressFile(trace_files[0], result)
elif len(trace_files) > 1:
result = output or 'chrome-combined-trace-%s.zip' % _GetTraceTimestamp()
_ArchiveFiles(trace_files, result)
elif output:
result = output
shutil.move(trace_files[0], result)
else:
result = trace_files[0]
if write_html:
result, trace_file = os.path.splitext(result)[0] + '.html', result
_PackageTraceAsHtml(trace_file, result)
if trace_file != result:
os.unlink(trace_file)
_PrintMessage('done')
_PrintMessage('Trace written to %s' % os.path.abspath(result))
return result
def _CaptureAndPullTrace(controllers, interval, output, compress, write_html):
trace_type = ' + '.join(map(str, controllers))
try:
_StartTracing(controllers, interval)
if interval:
_PrintMessage('Capturing %d-second %s. Press Enter to stop early...' % \
(interval, trace_type), eol='')
_WaitForEnter(interval)
else:
_PrintMessage('Capturing %s. Press Enter to stop...' % trace_type, eol='')
raw_input()
finally:
_StopTracing(controllers)
if interval:
_PrintMessage('done')
return _PullTraces(controllers, output, compress, write_html)
def _ComputeChromeCategories(options):
categories = []
if options.trace_frame_viewer:
categories.append('disabled-by-default-cc.debug')
if options.trace_ubercompositor:
categories.append('disabled-by-default-cc.debug*')
if options.trace_gpu:
categories.append('disabled-by-default-gpu.debug*')
if options.trace_flow:
categories.append('disabled-by-default-toplevel.flow')
if options.chrome_categories:
categories += options.chrome_categories.split(',')
return categories
def _ComputeSystraceCategories(options):
if not options.systrace_categories:
return []
return options.systrace_categories.split(',')
def main():
parser = optparse.OptionParser(description='Record about://tracing profiles '
'from Android browsers. See http://dev.'
'chromium.org/developers/how-tos/trace-event-'
'profiling-tool for detailed instructions for '
'profiling.')
timed_options = optparse.OptionGroup(parser, 'Timed tracing')
timed_options.add_option('-t', '--time', help='Profile for N seconds and '
'download the resulting trace.', metavar='N',
type='float')
parser.add_option_group(timed_options)
cont_options = optparse.OptionGroup(parser, 'Continuous tracing')
cont_options.add_option('--continuous', help='Profile continuously until '
'stopped.', action='store_true')
cont_options.add_option('--ring-buffer', help='Use the trace buffer as a '
'ring buffer and save its contents when stopping '
'instead of appending events into one long trace.',
action='store_true')
parser.add_option_group(cont_options)
categories = optparse.OptionGroup(parser, 'Trace categories')
categories.add_option('-c', '--categories', help='Select Chrome tracing '
'categories with comma-delimited wildcards, '
'e.g., "*", "cat1*,-cat1a". Omit this option to trace '
'Chrome\'s default categories. Chrome tracing can be '
'disabled with "--categories=\'\'".',
metavar='CHROME_CATEGORIES', dest='chrome_categories',
default=_DEFAULT_CHROME_CATEGORIES)
categories.add_option('-s', '--systrace', help='Capture a systrace with the '
'chosen comma-delimited systrace categories. You can '
'also capture a combined Chrome + systrace by enabling '
'both types of categories. Use "list" to see the '
'available categories. Systrace is disabled by '
'default.', metavar='SYS_CATEGORIES',
dest='systrace_categories', default='')
categories.add_option('--trace-cc',
help='Deprecated, use --trace-frame-viewer.',
action='store_true')
categories.add_option('--trace-frame-viewer',
help='Enable enough trace categories for '
'compositor frame viewing.', action='store_true')
categories.add_option('--trace-ubercompositor',
help='Enable enough trace categories for '
'ubercompositor frame data.', action='store_true')
categories.add_option('--trace-gpu', help='Enable extra trace categories for '
'GPU data.', action='store_true')
categories.add_option('--trace-flow', help='Enable extra trace categories '
'for IPC message flows.', action='store_true')
parser.add_option_group(categories)
output_options = optparse.OptionGroup(parser, 'Output options')
output_options.add_option('-o', '--output', help='Save trace output to file.')
output_options.add_option('--html', help='Package trace into a standalone '
'html file.', action='store_true')
output_options.add_option('--view', help='Open resulting trace file in a '
'browser.', action='store_true')
parser.add_option_group(output_options)
browsers = sorted(_GetSupportedBrowsers().keys())
parser.add_option('-b', '--browser', help='Select among installed browsers. '
'One of ' + ', '.join(browsers) + ', "stable" is used by '
'default.', type='choice', choices=browsers,
default='stable')
parser.add_option('-v', '--verbose', help='Verbose logging.',
action='store_true')
parser.add_option('-z', '--compress', help='Compress the resulting trace '
'with gzip. ', action='store_true')
options, args = parser.parse_args()
if options.trace_cc:
parser.parse_error("""--trace-cc is deprecated.
For basic jank busting uses, use --trace-frame-viewer
For detailed study of ubercompositor, pass --trace-ubercompositor.
When in doubt, just try out --trace-frame-viewer.
""")
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
adb = android_commands.AndroidCommands()
if options.systrace_categories in ['list', 'help']:
_PrintMessage('\n'.join(SystraceController.GetCategories(adb)))
return 0
if not options.time and not options.continuous:
_PrintMessage('Time interval or continuous tracing should be specified.')
return 1
chrome_categories = _ComputeChromeCategories(options)
systrace_categories = _ComputeSystraceCategories(options)
package_info = _GetSupportedBrowsers()[options.browser]
if chrome_categories and 'webview' in systrace_categories:
logging.warning('Using the "webview" category in systrace together with '
'Chrome tracing results in duplicate trace events.')
controllers = []
if chrome_categories:
controllers.append(ChromeTracingController(adb,
package_info,
chrome_categories,
options.ring_buffer))
if systrace_categories:
controllers.append(SystraceController(adb,
systrace_categories,
options.ring_buffer))
if not controllers:
_PrintMessage('No trace categories enabled.')
return 1
result = _CaptureAndPullTrace(controllers,
options.time if not options.continuous else 0,
options.output,
options.compress,
options.html)
if options.view:
webbrowser.open(result)
if __name__ == '__main__':
sys.exit(main())
|
|
import errno
import logging
import socket
import sys
import unittest
import time
import warnings
import mock
try:
from urllib.parse import urlencode
except:
from urllib import urlencode
from .. import (
requires_network, onlyPy3, onlyPy26OrOlder,
TARPIT_HOST, VALID_SOURCE_ADDRESSES, INVALID_SOURCE_ADDRESSES,
)
from ..port_helpers import find_unused_port
from urllib3 import (
encode_multipart_formdata,
HTTPConnectionPool,
)
from urllib3.exceptions import (
ConnectTimeoutError,
EmptyPoolError,
DecodeError,
MaxRetryError,
ReadTimeoutError,
ProtocolError,
NewConnectionError,
)
from urllib3.packages.six import b, u
from urllib3.util.retry import Retry
from urllib3.util.timeout import Timeout
from dummyserver.testcase import HTTPDummyServerTestCase, SocketDummyServerTestCase
from dummyserver.server import NoIPv6Warning, HAS_IPV6_AND_DNS
from threading import Event
log = logging.getLogger('urllib3.connectionpool')
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
SHORT_TIMEOUT = 0.001
LONG_TIMEOUT = 0.01
def wait_for_socket(ready_event):
ready_event.wait()
ready_event.clear()
class TestConnectionPoolTimeouts(SocketDummyServerTestCase):
def test_timeout_float(self):
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=2)
# Pool-global timeout
pool = HTTPConnectionPool(self.host, self.port, timeout=SHORT_TIMEOUT, retries=False)
wait_for_socket(ready_event)
self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/')
block_event.set() # Release block
# Shouldn't raise this time
wait_for_socket(ready_event)
block_event.set() # Pre-release block
pool.request('GET', '/')
def test_conn_closed(self):
block_event = Event()
self.start_basic_handler(block_send=block_event, num=1)
pool = HTTPConnectionPool(self.host, self.port, timeout=SHORT_TIMEOUT, retries=False)
conn = pool._get_conn()
pool._put_conn(conn)
try:
pool.urlopen('GET', '/')
self.fail("The request should fail with a timeout error.")
except ReadTimeoutError:
if conn.sock:
self.assertRaises(socket.error, conn.sock.recv, 1024)
finally:
pool._put_conn(conn)
block_event.set()
def test_timeout(self):
# Requests should time out when expected
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=6)
# Pool-global timeout
timeout = Timeout(read=SHORT_TIMEOUT)
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout, retries=False)
wait_for_socket(ready_event)
conn = pool._get_conn()
self.assertRaises(ReadTimeoutError, pool._make_request, conn, 'GET', '/')
pool._put_conn(conn)
block_event.set() # Release request
wait_for_socket(ready_event)
block_event.clear()
self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/')
block_event.set() # Release request
# Request-specific timeouts should raise errors
pool = HTTPConnectionPool(self.host, self.port, timeout=LONG_TIMEOUT, retries=False)
conn = pool._get_conn()
wait_for_socket(ready_event)
now = time.time()
self.assertRaises(ReadTimeoutError, pool._make_request, conn, 'GET', '/', timeout=timeout)
delta = time.time() - now
block_event.set() # Release request
self.assertTrue(delta < LONG_TIMEOUT, "timeout was pool-level LONG_TIMEOUT rather than request-level SHORT_TIMEOUT")
pool._put_conn(conn)
wait_for_socket(ready_event)
now = time.time()
self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/', timeout=timeout)
delta = time.time() - now
self.assertTrue(delta < LONG_TIMEOUT, "timeout was pool-level LONG_TIMEOUT rather than request-level SHORT_TIMEOUT")
block_event.set() # Release request
# Timeout int/float passed directly to request and _make_request should
# raise a request timeout
wait_for_socket(ready_event)
self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/', timeout=SHORT_TIMEOUT)
block_event.set() # Release request
wait_for_socket(ready_event)
conn = pool._new_conn()
# FIXME: This assert flakes sometimes. Not sure why.
self.assertRaises(ReadTimeoutError, pool._make_request, conn, 'GET', '/', timeout=SHORT_TIMEOUT)
block_event.set() # Release request
def test_connect_timeout(self):
url = '/'
host, port = TARPIT_HOST, 80
timeout = Timeout(connect=SHORT_TIMEOUT)
# Pool-global timeout
pool = HTTPConnectionPool(host, port, timeout=timeout)
conn = pool._get_conn()
self.assertRaises(ConnectTimeoutError, pool._make_request, conn, 'GET', url)
# Retries
retries = Retry(connect=0)
self.assertRaises(MaxRetryError, pool.request, 'GET', url, retries=retries)
# Request-specific connection timeouts
big_timeout = Timeout(read=LONG_TIMEOUT, connect=LONG_TIMEOUT)
pool = HTTPConnectionPool(host, port, timeout=big_timeout, retries=False)
conn = pool._get_conn()
self.assertRaises(ConnectTimeoutError, pool._make_request, conn, 'GET', url, timeout=timeout)
pool._put_conn(conn)
self.assertRaises(ConnectTimeoutError, pool.request, 'GET', url, timeout=timeout)
def test_total_applies_connect(self):
host, port = TARPIT_HOST, 80
timeout = Timeout(total=None, connect=SHORT_TIMEOUT)
pool = HTTPConnectionPool(host, port, timeout=timeout)
conn = pool._get_conn()
self.assertRaises(ConnectTimeoutError, pool._make_request, conn, 'GET', '/')
timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT)
pool = HTTPConnectionPool(host, port, timeout=timeout)
conn = pool._get_conn()
self.assertRaises(ConnectTimeoutError, pool._make_request, conn, 'GET', '/')
def test_total_timeout(self):
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=2)
wait_for_socket(ready_event)
# This will get the socket to raise an EAGAIN on the read
timeout = Timeout(connect=3, read=SHORT_TIMEOUT)
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout, retries=False)
self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/')
block_event.set()
wait_for_socket(ready_event)
block_event.clear()
# The connect should succeed and this should hit the read timeout
timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT)
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout, retries=False)
self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/')
def test_create_connection_timeout(self):
timeout = Timeout(connect=SHORT_TIMEOUT, total=LONG_TIMEOUT)
pool = HTTPConnectionPool(TARPIT_HOST, self.port, timeout=timeout, retries=False)
conn = pool._new_conn()
self.assertRaises(ConnectTimeoutError, conn.connect)
class TestConnectionPool(HTTPDummyServerTestCase):
def setUp(self):
self.pool = HTTPConnectionPool(self.host, self.port)
def test_get(self):
r = self.pool.request('GET', '/specific_method',
fields={'method': 'GET'})
self.assertEqual(r.status, 200, r.data)
def test_post_url(self):
r = self.pool.request('POST', '/specific_method',
fields={'method': 'POST'})
self.assertEqual(r.status, 200, r.data)
def test_urlopen_put(self):
r = self.pool.urlopen('PUT', '/specific_method?method=PUT')
self.assertEqual(r.status, 200, r.data)
def test_wrong_specific_method(self):
# To make sure the dummy server is actually returning failed responses
r = self.pool.request('GET', '/specific_method',
fields={'method': 'POST'})
self.assertEqual(r.status, 400, r.data)
r = self.pool.request('POST', '/specific_method',
fields={'method': 'GET'})
self.assertEqual(r.status, 400, r.data)
def test_upload(self):
data = "I'm in ur multipart form-data, hazing a cheezburgr"
fields = {
'upload_param': 'filefield',
'upload_filename': 'lolcat.txt',
'upload_size': len(data),
'filefield': ('lolcat.txt', data),
}
r = self.pool.request('POST', '/upload', fields=fields)
self.assertEqual(r.status, 200, r.data)
def test_one_name_multiple_values(self):
fields = [
('foo', 'a'),
('foo', 'b'),
]
# urlencode
r = self.pool.request('GET', '/echo', fields=fields)
self.assertEqual(r.data, b'foo=a&foo=b')
# multipart
r = self.pool.request('POST', '/echo', fields=fields)
self.assertEqual(r.data.count(b'name="foo"'), 2)
def test_request_method_body(self):
body = b'hi'
r = self.pool.request('POST', '/echo', body=body)
self.assertEqual(r.data, body)
fields = [('hi', 'hello')]
self.assertRaises(TypeError, self.pool.request, 'POST', '/echo', body=body, fields=fields)
def test_unicode_upload(self):
fieldname = u('myfile')
filename = u('\xe2\x99\xa5.txt')
data = u('\xe2\x99\xa5').encode('utf8')
size = len(data)
fields = {
u('upload_param'): fieldname,
u('upload_filename'): filename,
u('upload_size'): size,
fieldname: (filename, data),
}
r = self.pool.request('POST', '/upload', fields=fields)
self.assertEqual(r.status, 200, r.data)
def test_nagle(self):
""" Test that connections have TCP_NODELAY turned on """
# This test needs to be here in order to be run. socket.create_connection actually tries to
# connect to the host provided so we need a dummyserver to be running.
pool = HTTPConnectionPool(self.host, self.port)
conn = pool._get_conn()
pool._make_request(conn, 'GET', '/')
tcp_nodelay_setting = conn.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(tcp_nodelay_setting)
def test_socket_options(self):
"""Test that connections accept socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries to
# connect to the host provided so we need a dummyserver to be running.
pool = HTTPConnectionPool(self.host, self.port, socket_options=[
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
])
s = pool._new_conn()._new_conn() # Get the socket
using_keepalive = s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
self.assertTrue(using_keepalive)
s.close()
def test_disable_default_socket_options(self):
"""Test that passing None disables all socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries to
# connect to the host provided so we need a dummyserver to be running.
pool = HTTPConnectionPool(self.host, self.port, socket_options=None)
s = pool._new_conn()._new_conn()
using_nagle = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) == 0
self.assertTrue(using_nagle)
s.close()
def test_defaults_are_applied(self):
"""Test that modifying the default socket options works."""
# This test needs to be here in order to be run. socket.create_connection actually tries to
# connect to the host provided so we need a dummyserver to be running.
pool = HTTPConnectionPool(self.host, self.port)
# Get the HTTPConnection instance
conn = pool._new_conn()
# Update the default socket options
conn.default_socket_options += [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
s = conn._new_conn()
nagle_disabled = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) > 0
using_keepalive = s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
self.assertTrue(nagle_disabled)
self.assertTrue(using_keepalive)
def test_connection_error_retries(self):
""" ECONNREFUSED error should raise a connection error, with retries """
port = find_unused_port()
pool = HTTPConnectionPool(self.host, port)
try:
pool.request('GET', '/', retries=Retry(connect=3))
self.fail("Should have failed with a connection error.")
except MaxRetryError as e:
self.assertEqual(type(e.reason), NewConnectionError)
def test_timeout_success(self):
timeout = Timeout(connect=3, read=5, total=None)
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout)
pool.request('GET', '/')
# This should not raise a "Timeout already started" error
pool.request('GET', '/')
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout)
# This should also not raise a "Timeout already started" error
pool.request('GET', '/')
timeout = Timeout(total=None)
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout)
pool.request('GET', '/')
def test_tunnel(self):
# note the actual httplib.py has no tests for this functionality
timeout = Timeout(total=None)
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout)
conn = pool._get_conn()
try:
conn.set_tunnel(self.host, self.port)
except AttributeError: # python 2.6
conn._set_tunnel(self.host, self.port)
conn._tunnel = mock.Mock(return_value=None)
pool._make_request(conn, 'GET', '/')
conn._tunnel.assert_called_once_with()
# test that it's not called when tunnel is not set
timeout = Timeout(total=None)
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout)
conn = pool._get_conn()
conn._tunnel = mock.Mock(return_value=None)
pool._make_request(conn, 'GET', '/')
self.assertEqual(conn._tunnel.called, False)
def test_redirect(self):
r = self.pool.request('GET', '/redirect', fields={'target': '/'}, redirect=False)
self.assertEqual(r.status, 303)
r = self.pool.request('GET', '/redirect', fields={'target': '/'})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_bad_connect(self):
pool = HTTPConnectionPool('badhost.invalid', self.port)
try:
pool.request('GET', '/', retries=5)
self.fail("should raise timeout exception here")
except MaxRetryError as e:
self.assertEqual(type(e.reason), NewConnectionError)
def test_keepalive(self):
pool = HTTPConnectionPool(self.host, self.port, block=True, maxsize=1)
r = pool.request('GET', '/keepalive?close=0')
r = pool.request('GET', '/keepalive?close=0')
self.assertEqual(r.status, 200)
self.assertEqual(pool.num_connections, 1)
self.assertEqual(pool.num_requests, 2)
def test_keepalive_close(self):
pool = HTTPConnectionPool(self.host, self.port,
block=True, maxsize=1, timeout=2)
r = pool.request('GET', '/keepalive?close=1', retries=0,
headers={
"Connection": "close",
})
self.assertEqual(pool.num_connections, 1)
# The dummyserver will have responded with Connection:close,
# and httplib will properly cleanup the socket.
# We grab the HTTPConnection object straight from the Queue,
# because _get_conn() is where the check & reset occurs
# pylint: disable-msg=W0212
conn = pool.pool.get()
self.assertEqual(conn.sock, None)
pool._put_conn(conn)
# Now with keep-alive
r = pool.request('GET', '/keepalive?close=0', retries=0,
headers={
"Connection": "keep-alive",
})
# The dummyserver responded with Connection:keep-alive, the connection
# persists.
conn = pool.pool.get()
self.assertNotEqual(conn.sock, None)
pool._put_conn(conn)
# Another request asking the server to close the connection. This one
# should get cleaned up for the next request.
r = pool.request('GET', '/keepalive?close=1', retries=0,
headers={
"Connection": "close",
})
self.assertEqual(r.status, 200)
conn = pool.pool.get()
self.assertEqual(conn.sock, None)
pool._put_conn(conn)
# Next request
r = pool.request('GET', '/keepalive?close=0')
def test_post_with_urlencode(self):
data = {'banana': 'hammock', 'lol': 'cat'}
r = self.pool.request('POST', '/echo', fields=data, encode_multipart=False)
self.assertEqual(r.data.decode('utf-8'), urlencode(data))
def test_post_with_multipart(self):
data = {'banana': 'hammock', 'lol': 'cat'}
r = self.pool.request('POST', '/echo',
fields=data,
encode_multipart=True)
body = r.data.split(b'\r\n')
encoded_data = encode_multipart_formdata(data)[0]
expected_body = encoded_data.split(b'\r\n')
# TODO: Get rid of extra parsing stuff when you can specify
# a custom boundary to encode_multipart_formdata
"""
We need to loop the return lines because a timestamp is attached
from within encode_multipart_formdata. When the server echos back
the data, it has the timestamp from when the data was encoded, which
is not equivalent to when we run encode_multipart_formdata on
the data again.
"""
for i, line in enumerate(body):
if line.startswith(b'--'):
continue
self.assertEqual(body[i], expected_body[i])
def test_check_gzip(self):
r = self.pool.request('GET', '/encodingrequest',
headers={'accept-encoding': 'gzip'})
self.assertEqual(r.headers.get('content-encoding'), 'gzip')
self.assertEqual(r.data, b'hello, world!')
def test_check_deflate(self):
r = self.pool.request('GET', '/encodingrequest',
headers={'accept-encoding': 'deflate'})
self.assertEqual(r.headers.get('content-encoding'), 'deflate')
self.assertEqual(r.data, b'hello, world!')
def test_bad_decode(self):
self.assertRaises(DecodeError, self.pool.request,
'GET', '/encodingrequest',
headers={'accept-encoding': 'garbage-deflate'})
self.assertRaises(DecodeError, self.pool.request,
'GET', '/encodingrequest',
headers={'accept-encoding': 'garbage-gzip'})
def test_connection_count(self):
pool = HTTPConnectionPool(self.host, self.port, maxsize=1)
pool.request('GET', '/')
pool.request('GET', '/')
pool.request('GET', '/')
self.assertEqual(pool.num_connections, 1)
self.assertEqual(pool.num_requests, 3)
def test_connection_count_bigpool(self):
http_pool = HTTPConnectionPool(self.host, self.port, maxsize=16)
http_pool.request('GET', '/')
http_pool.request('GET', '/')
http_pool.request('GET', '/')
self.assertEqual(http_pool.num_connections, 1)
self.assertEqual(http_pool.num_requests, 3)
def test_partial_response(self):
pool = HTTPConnectionPool(self.host, self.port, maxsize=1)
req_data = {'lol': 'cat'}
resp_data = urlencode(req_data).encode('utf-8')
r = pool.request('GET', '/echo', fields=req_data, preload_content=False)
self.assertEqual(r.read(5), resp_data[:5])
self.assertEqual(r.read(), resp_data[5:])
def test_lazy_load_twice(self):
# This test is sad and confusing. Need to figure out what's
# going on with partial reads and socket reuse.
pool = HTTPConnectionPool(self.host, self.port, block=True, maxsize=1, timeout=2)
payload_size = 1024 * 2
first_chunk = 512
boundary = 'foo'
req_data = {'count': 'a' * payload_size}
resp_data = encode_multipart_formdata(req_data, boundary=boundary)[0]
req2_data = {'count': 'b' * payload_size}
resp2_data = encode_multipart_formdata(req2_data, boundary=boundary)[0]
r1 = pool.request('POST', '/echo', fields=req_data, multipart_boundary=boundary, preload_content=False)
self.assertEqual(r1.read(first_chunk), resp_data[:first_chunk])
try:
r2 = pool.request('POST', '/echo', fields=req2_data, multipart_boundary=boundary,
preload_content=False, pool_timeout=0.001)
# This branch should generally bail here, but maybe someday it will
# work? Perhaps by some sort of magic. Consider it a TODO.
self.assertEqual(r2.read(first_chunk), resp2_data[:first_chunk])
self.assertEqual(r1.read(), resp_data[first_chunk:])
self.assertEqual(r2.read(), resp2_data[first_chunk:])
self.assertEqual(pool.num_requests, 2)
except EmptyPoolError:
self.assertEqual(r1.read(), resp_data[first_chunk:])
self.assertEqual(pool.num_requests, 1)
self.assertEqual(pool.num_connections, 1)
def test_for_double_release(self):
MAXSIZE=5
# Check default state
pool = HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE)
self.assertEqual(pool.num_connections, 0)
self.assertEqual(pool.pool.qsize(), MAXSIZE)
# Make an empty slot for testing
pool.pool.get()
self.assertEqual(pool.pool.qsize(), MAXSIZE-1)
# Check state after simple request
pool.urlopen('GET', '/')
self.assertEqual(pool.pool.qsize(), MAXSIZE-1)
# Check state without release
pool.urlopen('GET', '/', preload_content=False)
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
pool.urlopen('GET', '/')
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
# Check state after read
pool.urlopen('GET', '/').data
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
pool.urlopen('GET', '/')
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
def test_release_conn_parameter(self):
MAXSIZE=5
pool = HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE)
self.assertEqual(pool.pool.qsize(), MAXSIZE)
# Make request without releasing connection
pool.request('GET', '/', release_conn=False, preload_content=False)
self.assertEqual(pool.pool.qsize(), MAXSIZE-1)
def test_dns_error(self):
pool = HTTPConnectionPool('thishostdoesnotexist.invalid', self.port, timeout=0.001)
self.assertRaises(MaxRetryError, pool.request, 'GET', '/test', retries=2)
def test_source_address(self):
for addr, is_ipv6 in VALID_SOURCE_ADDRESSES:
if is_ipv6 and not HAS_IPV6_AND_DNS:
warnings.warn("No IPv6 support: skipping.",
NoIPv6Warning)
continue
pool = HTTPConnectionPool(self.host, self.port,
source_address=addr, retries=False)
r = pool.request('GET', '/source_address')
self.assertEqual(r.data, b(addr[0]))
def test_source_address_error(self):
for addr in INVALID_SOURCE_ADDRESSES:
pool = HTTPConnectionPool(self.host, self.port, source_address=addr, retries=False)
# FIXME: This assert flakes sometimes. Not sure why.
self.assertRaises(NewConnectionError, pool.request, 'GET', '/source_address?{0}'.format(addr))
def test_stream_keepalive(self):
x = 2
for _ in range(x):
response = self.pool.request(
'GET',
'/chunked',
headers={
'Connection': 'keep-alive',
},
preload_content=False,
retries=False,
)
for chunk in response.stream():
self.assertEqual(chunk, b'123')
self.assertEqual(self.pool.num_connections, 1)
self.assertEqual(self.pool.num_requests, x)
def test_chunked_gzip(self):
response = self.pool.request(
'GET',
'/chunked_gzip',
preload_content=False,
decode_content=True,
)
self.assertEqual(b'123' * 4, response.read())
def test_cleanup_on_connection_error(self):
'''
Test that connections are recycled to the pool on
connection errors where no http response is received.
'''
poolsize = 3
with HTTPConnectionPool(self.host, self.port, maxsize=poolsize, block=True) as http:
self.assertEqual(http.pool.qsize(), poolsize)
# force a connection error by supplying a non-existent
# url. We won't get a response for this and so the
# conn won't be implicitly returned to the pool.
self.assertRaises(MaxRetryError,
http.request, 'GET', '/redirect', fields={'target': '/'}, release_conn=False, retries=0)
r = http.request('GET', '/redirect', fields={'target': '/'}, release_conn=False, retries=1)
r.release_conn()
# the pool should still contain poolsize elements
self.assertEqual(http.pool.qsize(), http.pool.maxsize)
class TestRetry(HTTPDummyServerTestCase):
def setUp(self):
self.pool = HTTPConnectionPool(self.host, self.port)
def test_max_retry(self):
try:
r = self.pool.request('GET', '/redirect',
fields={'target': '/'},
retries=0)
self.fail("Failed to raise MaxRetryError exception, returned %r" % r.status)
except MaxRetryError:
pass
def test_disabled_retry(self):
""" Disabled retries should disable redirect handling. """
r = self.pool.request('GET', '/redirect',
fields={'target': '/'},
retries=False)
self.assertEqual(r.status, 303)
r = self.pool.request('GET', '/redirect',
fields={'target': '/'},
retries=Retry(redirect=False))
self.assertEqual(r.status, 303)
pool = HTTPConnectionPool('thishostdoesnotexist.invalid', self.port, timeout=0.001)
self.assertRaises(NewConnectionError, pool.request, 'GET', '/test', retries=False)
def test_read_retries(self):
""" Should retry for status codes in the whitelist """
retry = Retry(read=1, status_forcelist=[418])
resp = self.pool.request('GET', '/successful_retry',
headers={'test-name': 'test_read_retries'},
retries=retry)
self.assertEqual(resp.status, 200)
def test_read_total_retries(self):
""" HTTP response w/ status code in the whitelist should be retried """
headers = {'test-name': 'test_read_total_retries'}
retry = Retry(total=1, status_forcelist=[418])
resp = self.pool.request('GET', '/successful_retry',
headers=headers, retries=retry)
self.assertEqual(resp.status, 200)
def test_retries_wrong_whitelist(self):
"""HTTP response w/ status code not in whitelist shouldn't be retried"""
retry = Retry(total=1, status_forcelist=[202])
resp = self.pool.request('GET', '/successful_retry',
headers={'test-name': 'test_wrong_whitelist'},
retries=retry)
self.assertEqual(resp.status, 418)
def test_default_method_whitelist_retried(self):
""" urllib3 should retry methods in the default method whitelist """
retry = Retry(total=1, status_forcelist=[418])
resp = self.pool.request('OPTIONS', '/successful_retry',
headers={'test-name': 'test_default_whitelist'},
retries=retry)
self.assertEqual(resp.status, 200)
def test_retries_wrong_method_list(self):
"""Method not in our whitelist should not be retried, even if code matches"""
headers = {'test-name': 'test_wrong_method_whitelist'}
retry = Retry(total=1, status_forcelist=[418],
method_whitelist=['POST'])
resp = self.pool.request('GET', '/successful_retry',
headers=headers, retries=retry)
self.assertEqual(resp.status, 418)
def test_read_retries_unsuccessful(self):
headers = {'test-name': 'test_read_retries_unsuccessful'}
resp = self.pool.request('GET', '/successful_retry',
headers=headers, retries=1)
self.assertEqual(resp.status, 418)
def test_retry_reuse_safe(self):
""" It should be possible to reuse a Retry object across requests """
headers = {'test-name': 'test_retry_safe'}
retry = Retry(total=1, status_forcelist=[418])
resp = self.pool.request('GET', '/successful_retry',
headers=headers, retries=retry)
self.assertEqual(resp.status, 200)
resp = self.pool.request('GET', '/successful_retry',
headers=headers, retries=retry)
self.assertEqual(resp.status, 200)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import json
import tempfile
import time
import os
import os.path
import pytest
import subprocess
import uuid
# Propagation delays for Device Registration API resource creation.
DEVICE_MODEL_PROPAGATION_DELAY_S = 30
DEVICE_INSTANCE_PROPAGATION_DELAY_S = 60
PROJECT_ID = os.environ.get('PROJECT_ID', 'dummy-project-id')
@pytest.fixture(scope='session')
def device_model():
device_model_id = 'assistant-sdk-test-model-%s' % str(uuid.uuid1())
subprocess.check_call(['python', '-m',
'googlesamples.assistant.grpc.devicetool',
'--project-id', PROJECT_ID,
'register-model', '--model', device_model_id,
'--type', 'LIGHT',
'--trait', 'action.devices.traits.OnOff',
'--manufacturer', 'assistant-sdk-test',
'--product-name', 'assistant-sdk-test'])
# Wait for model registration to be consistent
# on the Device Registration API.
time.sleep(DEVICE_MODEL_PROPAGATION_DELAY_S)
yield device_model_id
subprocess.check_call(['python', '-m',
'googlesamples.assistant.grpc.devicetool',
'--project-id', PROJECT_ID,
'delete', '--model', device_model_id])
@pytest.fixture(scope='session')
def device_instance(device_model):
device_instance_id = 'assistant-sdk-test-device-%s' % str(uuid.uuid1())
subprocess.check_call(['python', '-m',
'googlesamples.assistant.grpc.devicetool',
'--project-id', PROJECT_ID,
'register-device', '--model', device_model,
'--client-type', 'SERVICE',
'--device', device_instance_id])
# Wait for device registration to be consistent
# on the Device Registration API.
time.sleep(DEVICE_INSTANCE_PROPAGATION_DELAY_S)
yield device_instance_id
subprocess.check_call(['python', '-m',
'googlesamples.assistant.grpc.devicetool',
'--project-id', PROJECT_ID,
'delete', '--device', device_instance_id])
def test_endtoend_pushtotalk():
temp_dir = tempfile.mkdtemp()
audio_out_file = os.path.join(temp_dir, 'out.raw')
out = subprocess.check_output(['python', '-m',
'googlesamples.assistant.grpc.pushtotalk',
'--verbose',
'--device-model-id', 'test-device-model',
'--device-id', 'test-device',
'-i', 'tests/data/whattimeisit.riff',
'-o', audio_out_file],
stderr=subprocess.STDOUT)
print(out)
assert 'what time is it' in builtins.str(out).lower()
assert os.path.getsize(audio_out_file) > 0
def test_endtoend_pushtotalk_htmloutput(device_model, device_instance):
temp_dir = tempfile.mkdtemp()
audio_out_file = os.path.join(temp_dir, 'out.raw')
env = os.environ.copy()
env['TMPDIR'] = temp_dir
out = subprocess.check_output(['python', '-m',
'googlesamples.assistant.grpc.pushtotalk',
'--verbose',
'--device-model-id', device_model,
'--device-id', device_instance,
'-i', 'tests/data/grapefruit.riff',
'--display',
'-o', audio_out_file],
stderr=subprocess.STDOUT, env=env)
print(out)
assert 'grapefruit' in builtins.str(out).lower()
assert os.path.getsize(audio_out_file) > 0
files = [os.path.join(path, f)
for path, _, fs in os.walk(temp_dir) for f in fs]
assert len(files) > 0
screen_out = None
for f in files:
if os.path.basename(f) == 'google-assistant-sdk-screen-out.html':
screen_out = f
assert screen_out is not None
with open(screen_out, 'r') as f:
assert 'pamplemousse' in f.read()
def test_registration_pushtotalk(device_model):
temp_dir = tempfile.mkdtemp()
audio_out_file = os.path.join(temp_dir, 'out.raw')
# Use an non-existing device config file intentionally
# to force device registration.
device_config = os.path.join(temp_dir, 'device_config.json')
out = subprocess.check_output(['python', '-m',
'googlesamples.assistant.grpc.pushtotalk',
'--verbose',
'--project-id', PROJECT_ID,
'--device-model-id', device_model,
'--device-config', device_config,
'-i', 'tests/data/whattimeisit.riff',
'-o', audio_out_file],
stderr=subprocess.STDOUT)
assert 'what time is it' in builtins.str(out).lower()
assert os.path.getsize(audio_out_file) > 0
with open(device_config) as f:
config = json.load(f)
assert ('device registered: %s' % config['id']
in builtins.str(out).lower())
out = subprocess.check_output(
['python', '-m',
'googlesamples.assistant.grpc.devicetool',
'--project-id', PROJECT_ID,
'get', '--device', config['id']],
stderr=subprocess.STDOUT
)
print(out)
assert ('device instance id: %s' % config['id']
in builtins.str(out).lower())
subprocess.check_call(['python', '-m',
'googlesamples.assistant.grpc.devicetool',
'--project-id', PROJECT_ID,
'delete', '--device', config['id']])
def test_endtoend_textinput(device_model, device_instance):
p = subprocess.Popen(['python', '-m',
'googlesamples.assistant.grpc.textinput',
'--verbose',
'--device-model-id', device_model,
'--device-id', device_instance],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate(b'how do you say grapefruit in French?')
print(out)
out = builtins.str(out).lower()
assert err is None
assert 'grapefruit' in out
assert 'pamplemousse' in out
def test_endtoend_textinput_htmloutput(device_model, device_instance):
temp_dir = tempfile.mkdtemp()
env = os.environ.copy()
env['TMPDIR'] = temp_dir
p = subprocess.Popen(['python', '-m',
'googlesamples.assistant.grpc.textinput',
'--verbose',
'--device-model-id', device_model,
'--device-id', device_instance,
'--display'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env)
out, err = p.communicate(b'how do you say grapefruit in French?')
print(out)
out = builtins.str(out).lower()
assert err is None
assert 'grapefruit' in out
files = [os.path.join(path, f)
for path, _, fs in os.walk(temp_dir) for f in fs]
assert len(files) == 1
assert os.path.basename(files[0]) == 'google-assistant-sdk-screen-out.html'
with open(files[0], 'r') as f:
assert 'pamplemousse' in f.read()
def test_endtoend_audiofileinput(device_model, device_instance):
temp_dir = tempfile.mkdtemp()
audio_out_file = os.path.join(temp_dir, 'out.raw')
out = subprocess.check_output(
['python', '-m',
'googlesamples.assistant.grpc.audiofileinput',
'--verbose',
'--device-model-id', device_model,
'--device-id', device_instance,
'-i', 'tests/data/whattimeisit.riff',
'-o', audio_out_file],
stderr=subprocess.STDOUT)
print(out)
assert 'what time is it' in builtins.str(out).lower()
assert os.path.getsize(audio_out_file) > 0
|
|
from django.http import HttpResponse, JsonResponse
from django.views import generic
from django.conf.urls.static import static
from requests import *
import json
import datetime
class IndexView(generic.TemplateView):
# Main index.html.
template_name = 'animals/index.html'
def EOLforId(request, taxa_name):
'''
Get Encyclopedia of life ID match given a taxa scientific name.
Separated from modal info call in case there is no match.
Method argument: string scientific taxa name.
'''
taxa_id = 0
try:
r = get('http://eol.org/api/search/1.0.json?q='+ taxa_name + '&page=1&exact=true&filter_by_taxon_concept_id=&filter_by_hierarchy_entry_id=&filter_by_string=&cache_ttl=&key=1ded9f1eb184c38df6511aef3ba552a11b96e4c9')
r = r.json()
except requests.exceptions.RequestException:
return taxa_id
finally:
pass
if r["results"] is not None:
taxa_id = r["results"][0]["id"]
return HttpResponse(taxa_id)
def EOLforModalInfo(request, taxa_id):
'''
Get and parse Encyclopedia of Life info for a given taxa ID.
Check all photos returned.
Used to populate species cards and modals- this is a lot of info.
Method Argument: EOL taxa ID obtained from EOLforId.
'''
info = {"textStuff": [], "links": [], "pictures": [], "commonName": "", "scientificName": ""}
try:
r = get('http://eol.org/api/pages/1.0.json?batch=false&id=' + taxa_id + '&images_per_page=10&images_page=1&videos_per_page=0&videos_page=0&sounds_per_page=0&sounds_page=0&maps_per_page=0&maps_page=0&texts_per_page=2&texts_page=1&iucn=false&subjects=overview&licenses=all&details=true&common_names=true&synonyms=true&references=false&taxonomy=false&vetted=0&cache_ttl=&language=en&key=1ded9f1eb184c38df6511aef3ba552a11b96e4c9')
r = r.json()
except requests.exceptions.RequestException:
return JsonResponse(info)
finally:
pass
# Assign scientific name.
info["scientificName"] = r["scientificName"]
# Assign common name based on EOL english preferred option.
for name in r["vernacularNames"]:
if name["language"] == "en":
try:
if name["eol_preferred"] == True:
info["commonName"] = name["vernacularName"]
except:
pass
for data in r["dataObjects"]:
# Add image data to 'pictures'. TODO: concurrency-this.
try:
if data["mimeType"] == "image/jpeg":
p = get(data["mediaURL"])
if p.status_code == 200:
info["pictures"].append(data["mediaURL"])
else:
# Add source links.
info["links"].append(data["source"])
# Add text data to 'textStuff'.
if data["mimeType"] == "text/plain":
info["textStuff"].append(data["description"])
elif data["mimeType"] == "text/html":
info["textStuff"].append(data["description"])
except:
pass
return JsonResponse(info)
def loadTaxaObject(taxa_name):
'''
Creates/populates a 'current taxa' object with lists of subtaxa and supertaxa, given a scientific name.
Mimics structure of Catalog of life database object.
Checks Firebase database first, but if info is missing, this function gets it from the Catalog of Life API
and 'put's it into Firebase as an automigration.
Method Argument: string scientific taxa name.
'''
taxa = {"childtaxa": [], "supertaxa": [], "rank": "", "name": taxa_name, "question": ""}
# Get firebase data on selected taxa.
try:
f = get("https://animal-identification.firebaseio.com/specialData/" + taxa_name + "/.json")
f = f.json()
except:
f = {}
finally:
pass
try:
taxa["question"] = f["question"]
except:
taxa["question"] = ""
finally:
pass
try:
taxa["rank"] = f["rank"]
except:
pass
finally:
pass
try:
taxa["childtaxa"] = f["childtaxa"]
print(f["childtaxa"])
except:
# if the COL data hasn't already been added to the taxa in Firebase, get it from COL.
try:
r = get("http://www.catalogueoflife.org/col/webservice?name=" + taxa_name + "&format=json&response=full")
r=r.json()
r = r["results"][0]
except:
r= {}
finally:
pass
try:
taxa["rank"] = r["rank"]
except:
taxa["rank"] = ""
finally:
pass
try:
for child in r["child_taxa"]:
to_add = {"name": child["name"]}
taxa["childtaxa"].append(to_add)
except:
pass
finally:
pass
finally:
pass
try:
taxa["supertaxa"] = f["supertaxa"]
except:
try:
r = get("http://www.catalogueoflife.org/col/webservice?name=" + taxa_name + "&format=json&response=full")
r=r.json()
r = r["results"][0]
except:
r={}
finally:
pass
try:
for parent in r["classification"]:
to_add = {"name": parent["name"]}
taxa["supertaxa"].append(to_add)
except:
pass
finally:
pass
finally:
pass
# patch the taxa object back to firebase.
putter = put("https://animal-identification.firebaseio.com/specialData/" + taxa_name + "/.json", json=taxa)
return taxa
def loadTree(response, taxa_name):
'''
Loads a 'current taxa' object with the names, questions, and firebase status of all subtaxa and supertaxa, via loadTaxaObject function. Once migration is complete, I will combine this with loadTaxaObject.
Method Argument: string scientific name of taxa.
'''
taxa = loadTaxaObject(taxa_name)
# Get the 'question' data for each subtaxa and supertaxa.
for child in taxa["childtaxa"]:
c = get("https://animal-identification.firebaseio.com/specialData/" + child["name"] + "/.json")
c = c.json()
try:
child["question"] = c["question"]
except:
child["question"] = ""
finally:
pass
for parent in taxa["supertaxa"]:
p = get("https://animal-identification.firebaseio.com/specialData/" + parent["name"] + "/.json")
p = p.json()
try:
parent["question"] = p["question"]
except:
child["question"] = ""
finally:
pass
return JsonResponse(taxa)
def publishAnimal(response, taxa_name):
'''
Publishes user image to firebase database 'feed' table with current datetime and identified object.
Noted that this app only has one database url for a user image so if multiple people use it at once, the image upload will get super messed up.
On V2 to-do list to add users or sessioning/saving in django database.
Method Argument: String scientific name of taxa.
'''
userInfo = get("https://animal-identification.firebaseio.com/currentUserObject.json")
userInfo = userInfo.json()
thenow = datetime.datetime.now()
thenow = thenow.__str__()
print(thenow)
feedObj = {"name": taxa_name, "picture": userInfo["url"], "date": thenow}
p = post("https://animal-identification.firebaseio.com/feed/.json", json = feedObj)
return HttpResponse("/")
|
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, re
import platform
import tinctest
from tinctest.lib import run_shell_command, local_path
from tinctest import logger
class Gppkg:
DEFAULT_BUILD_PROD_URL = "http://artifacts-cache.ci.eng.pivotal.io/dist/GPDB"
GPPKG_URL = os.environ.get('GPPKG_RC_URL',None)
def check_pkg_exists(self, pkgname):
cmd = 'gppkg -q --all'
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'run gppkg', res)
logger.debug(res['stdout'])
pkgs = res['stdout'].strip().split('\n')[1:]
for pkg in pkgs:
if pkgname in pkg:
return (True, pkg)
return (False, None)
def run_gppkg_uninstall(self, pkgname):
"""
@summary: Runs gppkg -r to uninstall a gppkg. Output is written to gppkg_r.log file in current directory.
@param pkgfile: The name of the .gppkg file
@raise GppkgUtilError: If gppkg uninstall fails
"""
(existed, pkg) = self.check_pkg_exists(pkgname)
if not existed:
logger.info('the package does not exist, no need to remove, %s'%pkgname)
return True
logger.debug( '\nGppkgUtil: Uninstalling gppkg using gppkg file: %s' % (pkg))
cmd = 'gppkg -r %s' % pkg
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'run gppkg', res)
logger.debug(res)
if res['rc']> 0:
logger.info('Failed to Uninstall the package, %s' % pkgname)
return False
else:
return True
def run_gppkg_install(self, pkgfile):
"""
@summary: Runs gppkg -i to install a gppkg. Output is written to gppkg_i.log file in current directory.
@param pkgdir: The directory containing the gppkg file
@param pkgfile: The name of the .gppkg file in pkgdir
@raise GppkgUtilError: If gppkg install fails or if pkgfile specified does not exist
"""
if os.path.isfile(pkgfile):
logger.debug( '\nGppkgUtil: Installing gppkg using gppkg file: %s' % (pkgfile))
cmd = 'gppkg -i %s' % pkgfile
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'run gppkg', res)
logger.debug(res)
if res['rc']> 0:
tinctest.logger.info('result from install package %s' % res['stdout'])
raise Exception('Failed to install the package')
self.check_and_install_sql(res['stdout'])
else:
raise Exception("*** ERROR: .gppkg file not found '. Make sure %s exists." % (pkgfile))
def check_and_install_sql(self, output = None):
lines = output.strip().split('\n')
res = {'rc':0, 'stderr':'', 'stdout':''}
for line in lines:
if 'Please run psql -d mydatabase -f $GPHOME' in line:
sql_path = os.environ.get('GPHOME') + line.split('Please run psql -d mydatabase -f $GPHOME')[1].split(' ')[0]
run_shell_command('psql -d %s -f %s' % (os.environ.get('PGDATABASE', 'gptest'), sql_path), 'run sql to build functions for the package', res)
tinctest.logger.info('running sql file %s, result is %s' % (sql_path, res['stdout']))
break
def download_pkg(self, product_version, gppkg):
"""
Download gppkg from artifacts server.
"""
target_dir = local_path('download/')
if not os.path.exists(target_dir):
os.makedirs(target_dir)
(rc, download_link, package_name) = self.get_download_url_from_build_prod(product_version, gppkg)
if rc != 0:
return (-1, None, None)
wget_cmd = 'wget --html-extension %s -O %s`basename %s`' % (download_link, target_dir, download_link)
logger.debug('Download link: %s' % wget_cmd)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (wget_cmd, 'run wget', res)
if res['rc'] > 0:
raise Exception("Gppkg download failed")
return (0, target_dir, package_name)
def gppkg_install(self, product_version, gppkg):
(existed, _) = self.check_pkg_exists(gppkg)
if existed:
return True
(rc, pkg_dir, pkg_name) = self.download_pkg(product_version, gppkg)
if rc != 0:
return False
pkgfile = local_path(pkg_dir + pkg_name)
self.run_gppkg_install(pkgfile)
run_shell_command('gpstop -air')
return True
def get_download_url_from_build_prod(self, product_version, gppkg):
# defaults to be 4.2
gpdb_version = '4.2'
if product_version.startswith('4.3'):
gpdb_version = '4.3'
orca = ""
try:
minor_version = float(re.compile('.*\d+\.\d+\.(\d+\.\d+)').match(product_version).group(1))
if minor_version >= float(5.0): #minor version grabbed from 4.3.5.0 when orca was introduced
orca = 'orca'
except Exception as e:
logger.error("%s" % str(e))
raise Exception('Unable to parse product_version: %s' % product_version)
os_, platform_ = self.get_os_platform()
compatiable = self.check_os_compatibility(os_, gppkg)
if not compatiable:
logger.error("the package %s is not compatiable with the os %s, please make sure the compatiable package exists" %(gppkg, os_))
return(-1, None, None)
build_prod_host = self.DEFAULT_BUILD_PROD_URL
gppkg_config = self.getconfig(product_version=gpdb_version, gppkg=gppkg)
gppkg_config['pkg'] = gppkg
gppkg_config['gpdbversion'] = gpdb_version
gppkg_config['os'] = self.failover_gppkg_to_os_version(os_, gppkg)
gppkg_config['platform'] = platform_
gppkg_config['type'] = 'gppkg'
gppkg_config['orca'] = orca
#GPDB 4.2 and 4.3 is having different nameing format for gppkg
if 'gpdbversion' in gppkg_config and 'ossversion' in gppkg_config:
gppkg_name = "%(pkg)s-ossv%(ossversion)s_pv%(version)s_gpdb%(gpdbversion)s%(orca)s-%(os)s-%(platform)s.%(type)s" % gppkg_config
elif gpdb_version == '4.3':
gppkg_name = "%(pkg)s-pv%(version)s_gpdb%(gpdbversion)s%(orca)s-%(os)s-%(platform)s.%(type)s" % gppkg_config
else:
gppkg_name = "%(pkg)s-%(version)s-%(os)s-%(platform)s.%(type)s" % gppkg_config
download_url = build_prod_host + '/gppkg/%(pkg)s/'%gppkg_config + gppkg_name
return (0, download_url, gppkg_name)
def getconfig(self, product_version='4.2', gppkg=None):
config_file = local_path('gppkg.'+product_version+'.config')
fd = open(config_file, 'r')
config = ()
for line in fd:
if gppkg in line:
properties = line.strip().split(":")[1]
config = dict(item.split("=") for item in properties.split(";") if item)
return config
def get_os_compatiable_pkg_list(self, os_ = 'rhel5'):
config_file = local_path('gppkg_platform.config')
if os.path.exists(config_file):
fd = open(config_file, 'r')
compatiable_pkg_list = []
for line in fd:
if os_ in line:
properties = line.strip().split(":")[1]
[compatiable_pkg_list.append(item) for item in properties.split(",") if item]
return compatiable_pkg_list
else:
raise Exception("gppkg_platform.config not found under: %s" % local_path(''))
def get_os_platform(self):
from sys import platform as _platform
machine = ''
if _platform == 'linux' or _platform == 'linux2': # Both SuSE and RHEL returns linux
if os.path.exists("/etc/SuSE-release"):
machine = 'suse'
else:
machine = 'redhat'
elif _platform == 'sunos5':
machine = 'solaris'
if not machine:
raise Exception('unable to determine the platform')
cmd = 'cat '
res = {'rc':0, 'stderr':'', 'stdout':''}
if machine.lower() == 'suse':
cmd = cmd + '/etc/SuSE-release'
run_shell_command (cmd, 'check os kernel version', res)
if 'SUSE Linux Enterprise Server 11' in res['stdout']:
os_ = 'suse11'
elif 'SUSE Linux Enterprise Server 10' in res['stdout']:
os_ = 'suse10'
elif machine.lower() == 'redhat':
cmd = cmd + '/etc/redhat-release'
run_shell_command (cmd, 'check os kernel version', res)
if 'Linux Server release 5.' in res['stdout']:
os_ = 'rhel5'
elif 'Linux Server release 6.' in res['stdout']:
os_ = 'rhel6'
elif machine.lower() == 'solaris':
cmd = cmd + '/etc/release'
run_shell_command (cmd, 'check os kernel version', res)
if 'Solaris 10' in res['stdout']:
os_ = 'sol10'
elif 'Solaris 11' in res['stdout']:
os_ = 'sol11'
logger.debug(res['stdout'])
return os_, platform.machine()
def check_os_compatibility(self, os_='rhel5', pkg_name=None):
gppkg_os_compatiable_list = self.get_os_compatiable_pkg_list(os_)
if pkg_name not in gppkg_os_compatiable_list:
return False
else:
return True
def failover_gppkg_to_os_version(self, os_=None, pkg_name=None):
""" this function basically return a gppkg version which works on current platform
except the plperl needs rhel6, rhel5, and suse10, suse11 for different platform
others can use the suse10, rhel5 version for both platforms
"""
if pkg_name == 'plperl':
return os_
else:
if os_ == 'suse11':
return 'suse10'
elif os_ == 'rhel6':
return 'rhel5'
elif os_ == 'sol11':
return 'sol10'
else:
return os_
|
|
# engine/default.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementations of per-dialect sqlalchemy.engine classes.
These are semi-private implementation classes which are only of importance
to database dialect authors; dialects will usually use the classes here
as the base class for their own corresponding classes.
"""
import re
import random
from . import reflection, interfaces, result
from ..sql import compiler, expression
from .. import types as sqltypes
from .. import exc, util, pool, processors
import codecs
import weakref
from .. import event
AUTOCOMMIT_REGEXP = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)',
re.I | re.UNICODE)
class DefaultDialect(interfaces.Dialect):
"""Default implementation of Dialect"""
statement_compiler = compiler.SQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = True
# the first value we'd get for an autoincrement
# column.
default_sequence_base = 1
# most DBAPIs happy with this for execute().
# not cx_oracle.
execute_sequence_format = tuple
supports_views = True
supports_sequences = False
sequences_optional = False
preexecute_autoincrement_sequences = False
postfetch_lastrowid = True
implicit_returning = False
supports_right_nested_joins = True
supports_native_enum = False
supports_native_boolean = False
supports_simple_order_by_label = True
engine_config_types = util.immutabledict([
('convert_unicode', util.bool_or_str('force')),
('pool_timeout', int),
('echo', util.bool_or_str('debug')),
('echo_pool', util.bool_or_str('debug')),
('pool_recycle', int),
('pool_size', int),
('max_overflow', int),
('pool_threadlocal', bool),
('use_native_unicode', bool),
])
# if the NUMERIC type
# returns decimal.Decimal.
# *not* the FLOAT type however.
supports_native_decimal = False
if util.py3k:
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
else:
supports_unicode_statements = False
supports_unicode_binds = False
returns_unicode_strings = False
description_encoding = 'use_encoding'
name = 'default'
# length at which to truncate
# any identifier.
max_identifier_length = 9999
# length at which to truncate
# the name of an index.
# Usually None to indicate
# 'use max_identifier_length'.
# thanks to MySQL, sigh
max_index_name_length = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
dbapi_type_map = {}
colspecs = {}
default_paramstyle = 'named'
supports_default_values = False
supports_empty_insert = True
supports_multivalues_insert = False
server_version_info = None
construct_arguments = None
"""Optional set of argument specifiers for various SQLAlchemy
constructs, typically schema items.
To implement, establish as a series of tuples, as in::
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": None
})
]
If the above construct is established on the Postgresql dialect,
the :class:`.Index` construct will now accept the keyword arguments
``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``.
Any other argument specified to the constructor of :class:`.Index`
which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`.
A dialect which does not include a ``construct_arguments`` member will
not participate in the argument validation system. For such a dialect,
any argument name is accepted by all participating constructs, within
the namespace of arguments prefixed with that dialect name. The rationale
here is so that third-party dialects that haven't yet implemented this
feature continue to function in the old way.
.. versionadded:: 0.9.2
.. seealso::
:class:`.DialectKWArgs` - implementing base class which consumes
:attr:`.DefaultDialect.construct_arguments`
"""
# indicates symbol names are
# UPPERCASEd if they are case insensitive
# within the database.
# if this is True, the methods normalize_name()
# and denormalize_name() must be provided.
requires_name_normalize = False
reflection_options = ()
def __init__(self, convert_unicode=False,
encoding='utf-8', paramstyle=None, dbapi=None,
implicit_returning=None,
supports_right_nested_joins=None,
case_sensitive=True,
supports_native_boolean=None,
label_length=None, **kwargs):
if not getattr(self, 'ported_sqla_06', True):
util.warn(
"The %s dialect is not yet ported to the 0.6 format" %
self.name)
self.convert_unicode = convert_unicode
self.encoding = encoding
self.positional = False
self._ischema = None
self.dbapi = dbapi
if paramstyle is not None:
self.paramstyle = paramstyle
elif self.dbapi is not None:
self.paramstyle = self.dbapi.paramstyle
else:
self.paramstyle = self.default_paramstyle
if implicit_returning is not None:
self.implicit_returning = implicit_returning
self.positional = self.paramstyle in ('qmark', 'format', 'numeric')
self.identifier_preparer = self.preparer(self)
self.type_compiler = self.type_compiler(self)
if supports_right_nested_joins is not None:
self.supports_right_nested_joins = supports_right_nested_joins
if supports_native_boolean is not None:
self.supports_native_boolean = supports_native_boolean
self.case_sensitive = case_sensitive
if label_length and label_length > self.max_identifier_length:
raise exc.ArgumentError(
"Label length of %d is greater than this dialect's"
" maximum identifier length of %d" %
(label_length, self.max_identifier_length))
self.label_length = label_length
if self.description_encoding == 'use_encoding':
self._description_decoder = \
processors.to_unicode_processor_factory(
encoding
)
elif self.description_encoding is not None:
self._description_decoder = \
processors.to_unicode_processor_factory(
self.description_encoding
)
self._encoder = codecs.getencoder(self.encoding)
self._decoder = processors.to_unicode_processor_factory(self.encoding)
@util.memoized_property
def _type_memos(self):
return weakref.WeakKeyDictionary()
@property
def dialect_description(self):
return self.name + "+" + self.driver
@classmethod
def get_pool_class(cls, url):
return getattr(cls, 'poolclass', pool.QueuePool)
def initialize(self, connection):
try:
self.server_version_info = \
self._get_server_version_info(connection)
except NotImplementedError:
self.server_version_info = None
try:
self.default_schema_name = \
self._get_default_schema_name(connection)
except NotImplementedError:
self.default_schema_name = None
try:
self.default_isolation_level = \
self.get_isolation_level(connection.connection)
except NotImplementedError:
self.default_isolation_level = None
self.returns_unicode_strings = self._check_unicode_returns(connection)
if self.description_encoding is not None and \
self._check_unicode_description(connection):
self._description_decoder = self.description_encoding = None
self.do_rollback(connection.connection)
def on_connect(self):
"""return a callable which sets up a newly created DBAPI connection.
This is used to set dialect-wide per-connection options such as
isolation modes, unicode modes, etc.
If a callable is returned, it will be assembled into a pool listener
that receives the direct DBAPI connection, with all wrappers removed.
If None is returned, no listener will be generated.
"""
return None
def _check_unicode_returns(self, connection, additional_tests=None):
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
if self.positional:
parameters = self.execute_sequence_format()
else:
parameters = {}
def check_unicode(test):
statement = cast_to(expression.select([test]).compile(dialect=self))
try:
cursor = connection.connection.cursor()
connection._cursor_execute(cursor, statement, parameters)
row = cursor.fetchone()
cursor.close()
except exc.DBAPIError as de:
# note that _cursor_execute() will have closed the cursor
# if an exception is thrown.
util.warn("Exception attempting to "
"detect unicode returns: %r" % de)
return False
else:
return isinstance(row[0], util.text_type)
tests = [
# detect plain VARCHAR
expression.cast(
expression.literal_column("'test plain returns'"),
sqltypes.VARCHAR(60)
),
# detect if there's an NVARCHAR type with different behavior available
expression.cast(
expression.literal_column("'test unicode returns'"),
sqltypes.Unicode(60)
),
]
if additional_tests:
tests += additional_tests
results = set([check_unicode(test) for test in tests])
if results.issuperset([True, False]):
return "conditional"
else:
return results == set([True])
def _check_unicode_description(self, connection):
# all DBAPIs on Py2K return cursor.description as encoded,
# until pypy2.1beta2 with sqlite, so let's just check it -
# it's likely others will start doing this too in Py2k.
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
cursor = connection.connection.cursor()
try:
cursor.execute(
cast_to(
expression.select([
expression.literal_column("'x'").label("some_label")
]).compile(dialect=self)
)
)
return isinstance(cursor.description[0][0], util.text_type)
finally:
cursor.close()
def type_descriptor(self, typeobj):
"""Provide a database-specific :class:`.TypeEngine` object, given
the generic object which comes from the types module.
This method looks for a dictionary called
``colspecs`` as a class or instance-level variable,
and passes on to :func:`.types.adapt_type`.
"""
return sqltypes.adapt_type(typeobj, self.colspecs)
def reflecttable(self, connection, table, include_columns, exclude_columns):
insp = reflection.Inspector.from_engine(connection)
return insp.reflecttable(table, include_columns, exclude_columns)
def get_pk_constraint(self, conn, table_name, schema=None, **kw):
"""Compatibility method, adapts the result of get_primary_keys()
for those dialects which don't implement get_pk_constraint().
"""
return {
'constrained_columns':
self.get_primary_keys(conn, table_name,
schema=schema, **kw)
}
def validate_identifier(self, ident):
if len(ident) > self.max_identifier_length:
raise exc.IdentifierError(
"Identifier '%s' exceeds maximum length of %d characters" %
(ident, self.max_identifier_length)
)
def connect(self, *cargs, **cparams):
return self.dbapi.connect(*cargs, **cparams)
def create_connect_args(self, url):
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
def set_engine_execution_options(self, engine, opts):
if 'isolation_level' in opts:
isolation_level = opts['isolation_level']
@event.listens_for(engine, "engine_connect")
def set_isolation(connection, branch):
if not branch:
self._set_connection_isolation(connection, isolation_level)
def set_connection_execution_options(self, connection, opts):
if 'isolation_level' in opts:
self._set_connection_isolation(connection, opts['isolation_level'])
def _set_connection_isolation(self, connection, level):
self.set_isolation_level(connection.connection, level)
connection.connection._connection_record.\
finalize_callback.append(self.reset_isolation_level)
def do_begin(self, dbapi_connection):
pass
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
def create_xid(self):
"""Create a random two-phase transaction ID.
This id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). Its format is unspecified.
"""
return "_sa_%032x" % random.randint(0, 2 ** 128)
def do_savepoint(self, connection, name):
connection.execute(expression.SavepointClause(name))
def do_rollback_to_savepoint(self, connection, name):
connection.execute(expression.RollbackToSavepointClause(name))
def do_release_savepoint(self, connection, name):
connection.execute(expression.ReleaseSavepointClause(name))
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(statement, parameters)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters)
def do_execute_no_params(self, cursor, statement, context=None):
cursor.execute(statement)
def is_disconnect(self, e, connection, cursor):
return False
def reset_isolation_level(self, dbapi_conn):
# default_isolation_level is read from the first connection
# after the initial set of 'isolation_level', if any, so is
# the configured default of this dialect.
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
class DefaultExecutionContext(interfaces.ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
isddl = False
executemany = False
result_map = None
compiled = None
statement = None
postfetch_cols = None
prefetch_cols = None
returning_cols = None
_is_implicit_returning = False
_is_explicit_returning = False
# a hook for SQLite's translation of
# result column names
_translate_colname = None
@classmethod
def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl):
"""Initialize execution context for a DDLElement construct."""
self = cls.__new__(cls)
self.dialect = dialect
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.engine = connection.engine
self.compiled = compiled = compiled_ddl
self.isddl = True
self.execution_options = compiled.statement._execution_options
if connection._execution_options:
self.execution_options = dict(self.execution_options)
self.execution_options.update(connection._execution_options)
if not dialect.supports_unicode_statements:
self.unicode_statement = util.text_type(compiled)
self.statement = dialect._encoder(self.unicode_statement)[0]
else:
self.statement = self.unicode_statement = util.text_type(compiled)
self.cursor = self.create_cursor()
self.compiled_parameters = []
if dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
return self
@classmethod
def _init_compiled(cls, dialect, connection, dbapi_connection,
compiled, parameters):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.dialect = dialect
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.engine = connection.engine
self.compiled = compiled
if not compiled.can_execute:
raise exc.ArgumentError("Not an executable clause")
self.execution_options = compiled.statement._execution_options
if connection._execution_options:
self.execution_options = dict(self.execution_options)
self.execution_options.update(connection._execution_options)
# compiled clauseelement. process bind params, process table defaults,
# track collections used by ResultProxy to target and process results
self.result_map = compiled.result_map
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding)
else:
self.statement = self.unicode_statement
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
if self.isinsert or self.isupdate or self.isdelete:
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(compiled.returning and \
not compiled.statement._returning)
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
else:
self.compiled_parameters = \
[compiled.construct_params(m, _group_number=grp) for
grp, m in enumerate(parameters)]
self.executemany = len(parameters) > 1
self.cursor = self.create_cursor()
if self.isinsert or self.isupdate:
self.postfetch_cols = self.compiled.postfetch
self.prefetch_cols = self.compiled.prefetch
self.returning_cols = self.compiled.returning
self.__process_defaults()
processors = compiled._bind_processors
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if dialect.positional:
for compiled_params in self.compiled_parameters:
param = []
for key in self.compiled.positiontup:
if key in processors:
param.append(processors[key](compiled_params[key]))
else:
param.append(compiled_params[key])
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
param = {}
if encode:
for key in compiled_params:
if key in processors:
param[dialect._encoder(key)[0]] = \
processors[key](compiled_params[key])
else:
param[dialect._encoder(key)[0]] = \
compiled_params[key]
else:
for key in compiled_params:
if key in processors:
param[key] = processors[key](compiled_params[key])
else:
param[key] = compiled_params[key]
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
return self
@classmethod
def _init_statement(cls, dialect, connection, dbapi_connection,
statement, parameters):
"""Initialize execution context for a string SQL statement."""
self = cls.__new__(cls)
self.dialect = dialect
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.engine = connection.engine
# plain text statement
self.execution_options = connection._execution_options
if not parameters:
if self.dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
elif isinstance(parameters[0], dialect.execute_sequence_format):
self.parameters = parameters
elif isinstance(parameters[0], dict):
if dialect.supports_unicode_statements:
self.parameters = parameters
else:
self.parameters = [
dict((dialect._encoder(k)[0], d[k]) for k in d)
for d in parameters
] or [{}]
else:
self.parameters = [dialect.execute_sequence_format(p)
for p in parameters]
self.executemany = len(parameters) > 1
if not dialect.supports_unicode_statements and \
isinstance(statement, util.text_type):
self.unicode_statement = statement
self.statement = dialect._encoder(statement)[0]
else:
self.statement = self.unicode_statement = statement
self.cursor = self.create_cursor()
return self
@classmethod
def _init_default(cls, dialect, connection, dbapi_connection):
"""Initialize execution context for a ColumnDefault construct."""
self = cls.__new__(cls)
self.dialect = dialect
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.engine = connection.engine
self.execution_options = connection._execution_options
self.cursor = self.create_cursor()
return self
@util.memoized_property
def no_parameters(self):
return self.execution_options.get("no_parameters", False)
@util.memoized_property
def is_crud(self):
return self.isinsert or self.isupdate or self.isdelete
@util.memoized_property
def should_autocommit(self):
autocommit = self.execution_options.get('autocommit',
not self.compiled and
self.statement and
expression.PARSE_AUTOCOMMIT
or False)
if autocommit is expression.PARSE_AUTOCOMMIT:
return self.should_autocommit_text(self.unicode_statement)
else:
return autocommit
def _execute_scalar(self, stmt, type_):
"""Execute a string statement on the current cursor, returning a
scalar result.
Used to fire off sequences, default phrases, and "select lastrowid"
types of statements individually or in the context of a parent INSERT
or UPDATE statement.
"""
conn = self.root_connection
if isinstance(stmt, util.text_type) and \
not self.dialect.supports_unicode_statements:
stmt = self.dialect._encoder(stmt)[0]
if self.dialect.positional:
default_params = self.dialect.execute_sequence_format()
else:
default_params = {}
conn._cursor_execute(self.cursor, stmt, default_params, context=self)
r = self.cursor.fetchone()[0]
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect,
self.cursor.description[0][1]
)
if proc:
return proc(r)
return r
@property
def connection(self):
return self.root_connection._branch()
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
def create_cursor(self):
return self._dbapi_connection.cursor()
def pre_exec(self):
pass
def post_exec(self):
pass
def get_result_processor(self, type_, colname, coltype):
"""Return a 'result processor' for a given type as present in
cursor.description.
This has a default implementation that dialects can override
for context-sensitive result type handling.
"""
return type_._cached_result_processor(self.dialect, coltype)
def get_lastrowid(self):
"""return self.cursor.lastrowid, or equivalent, after an INSERT.
This may involve calling special cursor functions,
issuing a new SELECT on the cursor (or a new one),
or returning a stored value that was
calculated within post_exec().
This function will only be called for dialects
which support "implicit" primary key generation,
keep preexecute_autoincrement_sequences set to False,
and when no explicit id value was bound to the
statement.
The function is called once, directly after
post_exec() and before the transaction is committed
or ResultProxy is generated. If the post_exec()
method assigns a value to `self._lastrowid`, the
value is used in place of calling get_lastrowid().
Note that this method is *not* equivalent to the
``lastrowid`` method on ``ResultProxy``, which is a
direct proxy to the DBAPI ``lastrowid`` accessor
in all cases.
"""
return self.cursor.lastrowid
def handle_dbapi_exception(self, e):
pass
def get_result_proxy(self):
return result.ResultProxy(self)
@property
def rowcount(self):
return self.cursor.rowcount
def supports_sane_rowcount(self):
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
return self.dialect.supports_sane_multi_rowcount
def post_insert(self):
if not self._is_implicit_returning and \
not self._is_explicit_returning and \
not self.compiled.inline and \
self.dialect.postfetch_lastrowid and \
(not self.inserted_primary_key or \
None in self.inserted_primary_key):
table = self.compiled.statement.table
lastrowid = self.get_lastrowid()
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
proc = autoinc_col.type._cached_result_processor(
self.dialect, None)
if proc is not None:
lastrowid = proc(lastrowid)
self.inserted_primary_key = [
lastrowid if c is autoinc_col else v
for c, v in zip(
table.primary_key,
self.inserted_primary_key)
]
def _fetch_implicit_returning(self, resultproxy):
table = self.compiled.statement.table
row = resultproxy.fetchone()
ipk = []
for c, v in zip(table.primary_key, self.inserted_primary_key):
if v is not None:
ipk.append(v)
else:
ipk.append(row[c])
self.inserted_primary_key = ipk
self.returned_defaults = row
def _fetch_implicit_update_returning(self, resultproxy):
row = resultproxy.fetchone()
self.returned_defaults = row
def lastrow_has_defaults(self):
return (self.isinsert or self.isupdate) and \
bool(self.postfetch_cols)
def set_input_sizes(self, translate=None, exclude_types=None):
"""Given a cursor and ClauseParameters, call the appropriate
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which require it,
currently cx_oracle.
"""
if not hasattr(self.compiled, 'bind_names'):
return
types = dict(
(self.compiled.bind_names[bindparam], bindparam.type)
for bindparam in self.compiled.bind_names)
if self.dialect.positional:
inputsizes = []
for key in self.compiled.positiontup:
typeengine = types[key]
dbtype = typeengine.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if dbtype is not None and \
(not exclude_types or dbtype not in exclude_types):
inputsizes.append(dbtype)
try:
self.cursor.setinputsizes(*inputsizes)
except Exception as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self)
else:
inputsizes = {}
for key in self.compiled.bind_names.values():
typeengine = types[key]
dbtype = typeengine.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if dbtype is not None and \
(not exclude_types or dbtype not in exclude_types):
if translate:
key = translate.get(key, key)
if not self.dialect.supports_unicode_binds:
key = self.dialect._encoder(key)[0]
inputsizes[key] = dbtype
try:
self.cursor.setinputsizes(**inputsizes)
except Exception as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self)
def _exec_default(self, default, type_):
if default.is_sequence:
return self.fire_sequence(default, type_)
elif default.is_callable:
return default.arg(self)
elif default.is_clause_element:
# TODO: expensive branching here should be
# pulled into _exec_scalar()
conn = self.connection
c = expression.select([default.arg]).compile(bind=conn)
return conn._execute_compiled(c, (), {}).scalar()
else:
return default.arg
def get_insert_default(self, column):
if column.default is None:
return None
else:
return self._exec_default(column.default, column.type)
def get_update_default(self, column):
if column.onupdate is None:
return None
else:
return self._exec_default(column.onupdate, column.type)
def __process_defaults(self):
"""Generate default values for compiled insert/update statements,
and generate inserted_primary_key collection.
"""
key_getter = self.compiled._key_getters_for_crud_column[2]
if self.executemany:
if len(self.compiled.prefetch):
scalar_defaults = {}
# pre-determine scalar Python-side defaults
# to avoid many calls of get_insert_default()/
# get_update_default()
for c in self.prefetch_cols:
if self.isinsert and c.default and c.default.is_scalar:
scalar_defaults[c] = c.default.arg
elif self.isupdate and c.onupdate and c.onupdate.is_scalar:
scalar_defaults[c] = c.onupdate.arg
for param in self.compiled_parameters:
self.current_parameters = param
for c in self.prefetch_cols:
if c in scalar_defaults:
val = scalar_defaults[c]
elif self.isinsert:
val = self.get_insert_default(c)
else:
val = self.get_update_default(c)
if val is not None:
param[key_getter(c)] = val
del self.current_parameters
else:
self.current_parameters = compiled_parameters = \
self.compiled_parameters[0]
for c in self.compiled.prefetch:
if self.isinsert:
val = self.get_insert_default(c)
else:
val = self.get_update_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
del self.current_parameters
if self.isinsert:
self.inserted_primary_key = [
self.compiled_parameters[0].get(key_getter(c), None)
for c in self.compiled.\
statement.table.primary_key
]
DefaultDialect.execution_ctx_cls = DefaultExecutionContext
|
|
# Copyright (C) 2012-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Serialization routines
You probabably don't need to use these directly.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import struct
# Py3 compatibility
import sys
if sys.version > '3':
_bchr = lambda x: bytes([x])
_bord = lambda x: x[0]
from io import BytesIO as _BytesIO
else:
_bchr = chr
_bord = ord
from cStringIO import StringIO as _BytesIO
MAX_SIZE = 0x02000000
def Hash(msg):
"""SHA256^2)(msg) -> bytes"""
return hashlib.sha256(hashlib.sha256(msg).digest()).digest()
def Hash160(msg):
"""RIPEME160(SHA256(msg)) -> bytes"""
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(msg).digest())
return h.digest()
class SerializationError(Exception):
"""Base class for serialization errors"""
class SerializationTruncationError(SerializationError):
"""Serialized data was truncated
Thrown by deserialize() and stream_deserialize()
"""
class DeserializationExtraDataError(SerializationError):
"""Deserialized data had extra data at the end
Thrown by deserialize() when not all data is consumed during
deserialization. The deserialized object and extra padding not consumed are
saved.
"""
def __init__(self, msg, obj, padding):
super(DeserializationExtraDataError, self).__init__(msg)
self.obj = obj
self.padding = padding
def ser_read(f, n):
"""Read from a stream safely
Raises SerializationError and SerializationTruncationError appropriately.
Use this instead of f.read() in your classes stream_(de)serialization()
functions.
"""
if n > MAX_SIZE:
raise SerializationError('Asked to read 0x%x bytes; MAX_SIZE exceeded')
r = f.read(n)
if len(r) < n:
raise SerializationTruncationError('Asked to read %i bytes, but only got %i' % (n, len(r)))
return r
class Serializable(object):
"""Base class for serializable objects"""
__slots__ = []
def stream_serialize(self, f):
"""Serialize to a stream"""
raise NotImplementedError
@classmethod
def stream_deserialize(cls, f):
"""Deserialize from a stream"""
raise NotImplementedError
def serialize(self):
"""Serialize, returning bytes"""
f = _BytesIO()
self.stream_serialize(f)
return f.getvalue()
@classmethod
def deserialize(cls, buf, allow_padding=False):
"""Deserialize bytes, returning an instance
allow_padding - Allow buf to include extra padding. (default False)
If allow_padding is False and not all bytes are consumed during
deserialization DeserializationExtraDataError will be raised.
"""
fd = _BytesIO(buf)
r = cls.stream_deserialize(fd)
if not allow_padding:
padding = fd.read()
if len(padding) != 0:
raise DeserializationExtraDataError('Not all bytes consumed during deserialization',
r, padding)
return r
def GetHash(self):
"""Return the hash of the serialized object"""
return Hash(self.serialize())
def __eq__(self, other):
if (not isinstance(other, self.__class__) and
not isinstance(self, other.__class__)):
return NotImplemented
return self.serialize() == other.serialize()
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.serialize())
class ImmutableSerializable(Serializable):
"""Immutable serializable object"""
__slots__ = ['_cached_GetHash', '_cached__hash__']
def __setattr__(self, name, value):
raise AttributeError('Object is immutable')
def __delattr__(self, name):
raise AttributeError('Object is immutable')
def GetHash(self):
"""Return the hash of the serialized object"""
try:
return self._cached_GetHash
except AttributeError:
_cached_GetHash = super(ImmutableSerializable, self).GetHash()
object.__setattr__(self, '_cached_GetHash', _cached_GetHash)
return _cached_GetHash
def __hash__(self):
try:
return self._cached__hash__
except AttributeError:
_cached__hash__ = hash(self.serialize())
object.__setattr__(self, '_cached__hash__', _cached__hash__)
return _cached__hash__
class Serializer(object):
"""Base class for object serializers"""
def __new__(cls):
raise NotImplementedError
@classmethod
def stream_serialize(cls, obj, f):
raise NotImplementedError
@classmethod
def stream_deserialize(cls, f):
raise NotImplementedError
@classmethod
def serialize(cls, obj):
f = _BytesIO()
cls.stream_serialize(obj, f)
return f.getvalue()
@classmethod
def deserialize(cls, buf):
return cls.stream_deserialize(_BytesIO(buf))
class VarIntSerializer(Serializer):
"""Serialization of variable length ints"""
@classmethod
def stream_serialize(cls, i, f):
if i < 0:
raise ValueError('varint must be non-negative integer')
elif i < 0xfd:
f.write(_bchr(i))
elif i <= 0xffff:
f.write(_bchr(0xfd))
f.write(struct.pack(b'<H', i))
elif i <= 0xffffffff:
f.write(_bchr(0xfe))
f.write(struct.pack(b'<I', i))
else:
f.write(_bchr(0xff))
f.write(struct.pack(b'<Q', i))
@classmethod
def stream_deserialize(cls, f):
r = _bord(ser_read(f, 1))
if r < 0xfd:
return r
elif r == 0xfd:
return struct.unpack(b'<H', ser_read(f, 2))[0]
elif r == 0xfe:
return struct.unpack(b'<I', ser_read(f, 4))[0]
else:
return struct.unpack(b'<Q', ser_read(f, 8))[0]
class BytesSerializer(Serializer):
"""Serialization of bytes instances"""
@classmethod
def stream_serialize(cls, b, f):
VarIntSerializer.stream_serialize(len(b), f)
f.write(b)
@classmethod
def stream_deserialize(cls, f):
l = VarIntSerializer.stream_deserialize(f)
return ser_read(f, l)
class VectorSerializer(Serializer):
"""Base class for serializers of object vectors"""
@classmethod
def stream_serialize(cls, inner_cls, objs, f):
VarIntSerializer.stream_serialize(len(objs), f)
for obj in objs:
inner_cls.stream_serialize(obj, f)
@classmethod
def stream_deserialize(cls, inner_cls, f):
n = VarIntSerializer.stream_deserialize(f)
r = []
for i in range(n):
r.append(inner_cls.stream_deserialize(f))
return r
class uint256VectorSerializer(Serializer):
"""Serialize vectors of uint256"""
@classmethod
def stream_serialize(cls, uints, f):
VarIntSerializer.stream_serialize(len(uints), f)
for uint in uints:
assert len(uint) == 32
f.write(uint)
@classmethod
def stream_deserialize(cls, f):
n = VarIntSerializer.stream_deserialize(f)
r = []
for i in range(n):
r.append(ser_read(f, 32))
return r
class intVectorSerialzer(Serializer):
@classmethod
def stream_serialize(cls, ints, f):
l = len(ints)
VarIntSerializer.stream_serialize(l, f)
for i in ints:
f.write(struct.pack(b"<i", i))
@classmethod
def stream_deserialize(cls, f):
l = VarIntSerializer.stream_deserialize(f)
ints = []
for i in range(l):
ints.append(struct.unpack(b"<i", ser_read(f, 4)))
class VarStringSerializer(Serializer):
"""Serialize variable length strings"""
@classmethod
def stream_serialize(cls, s, f):
l = len(s)
VarIntSerializer.stream_serialize(l, f)
f.write(s)
@classmethod
def stream_deserialize(cls, f):
l = VarIntSerializer.stream_deserialize(f)
return ser_read(f, l)
def uint256_from_str(s):
"""Convert bytes to uint256"""
r = 0
t = struct.unpack(b"<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
"""Convert compact encoding to uint256
Used for the nBits compact encoding of the target in the block header.
"""
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def uint256_to_shortstr(u):
s = "%064x" % (u,)
return s[:16]
__all__ = (
'MAX_SIZE',
'Hash',
'Hash160',
'SerializationError',
'SerializationTruncationError',
'DeserializationExtraDataError',
'ser_read',
'Serializable',
'ImmutableSerializable',
'Serializer',
'VarIntSerializer',
'BytesSerializer',
'VectorSerializer',
'uint256VectorSerializer',
'intVectorSerialzer',
'VarStringSerializer',
'uint256_from_str',
'uint256_from_compact',
'uint256_to_shortstr',
)
|
|
#!/usr/bin/env python
#Parallel JTAG programmer for the MSP430 embedded proccessor.
#
#(C) 2002 Chris Liechti <[email protected]>
#this is distributed under a free software license, see license.txt
#
#Requires Python 2+ and the binary extension _parjtag.
import sys
import _parjtag
VERSION = "1.3"
DEBUG = 0 #disable debug messages by default
#frame specific consts
ERASE_MASS = 2
ERASE_MAIN = 1
ERASE_SGMT = 0
#states
FREERUNNING = 0
STOPPED = 1
#Configurations of the MSP430 driver
VERIFICATION_MODE = 0 #Verify data downloaded to FLASH memories.
RAMSIZE_OPTION = 1 #Change RAM used to download and program flash blocks
DEBUG_OPTION = 2 #Set debug level. Enables debug outputs.
#enumeration of output formats for uploads
HEX = 0
INTELHEX = 1
BINARY = 2
#exceptions
class JTAGException(Exception): pass
#for the use with memread
def hexdump( (adr, memstr) ):
"""Print a hex dump of data collected with memread
arg1: tuple with adress, memory
return None"""
count = 0
ascii = ''
for value in map(ord, memstr):
if not count: print "%04x: " % adr,
print "%02x" % value,
ascii += (32 <= value < 128) and chr(value) or '.'
count += 1
adr += 1
if count == 16:
count = 0
print " ", ascii
ascii = ''
if count < 16: print " "*(16-count), " ", ascii
def makeihex( (address, data) ):
"""work though the data and output lines in inzel hex format.
and end tag is appended"""
start = 0
while start<len(data):
end = start + 16
if end > len(data): end = len(data)
_ihexline(address, [ord(x) for x in data[start:end]])
start += 16
address += 16
_ihexline(address, [], type=1) #append no data but an end line
def _ihexline(address, buffer, type=0):
"""encode one line, output with checksum"""
sys.stdout.write( ':%02X%04X%02X' % (len(buffer), address & 0xffff, type) )
sum = len(buffer) + ((address >> 8) & 255) + (address & 255)
for b in buffer:
if b == None: b = 0 #substitute nonexistent values with zero
sys.stdout.write('%02X' % (b & 255))
sum += b&255
sys.stdout.write('%02X\n' %( (-sum) & 255))
class Segment:
"""store a string with memory contents along with its startaddress"""
def __init__(self, startaddress = 0, data=None):
if data is None:
self.data = ''
else:
self.data = data
self.startaddress = startaddress
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def __repr__(self):
return "Segment(startaddress = 0x%04x, data=%r)" % (self.startaddress, self.data)
class Memory:
"""represent memory contents. with functions to load files"""
def __init__(self, filename=None):
self.segments = []
if filename:
self.filename = filename
self.loadFile(filename)
def append(self, seg):
self.segments.append(seg)
def __getitem__(self, index):
return self.segments[index]
def __len__(self):
return len(self.segments)
def loadIHex(self, file):
"""load data from a (opened) file in Intel-HEX format"""
segmentdata = []
currentAddr = 0
startAddr = 0
lines = file.readlines()
for l in lines:
if not l.strip(): continue #skip empty lines
if l[0] != ':': raise Exception("File Format Error\n")
l = l.strip() #fix CR-LF issues...
length = int(l[1:3],16)
address = int(l[3:7],16)
type = int(l[7:9],16)
check = int(l[-2:],16)
if type == 0x00:
if currentAddr != address:
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
startAddr = currentAddr = address
segmentdata = []
for i in range(length):
segmentdata.append( chr(int(l[9+2*i:11+2*i],16)) )
currentAddr = length + currentAddr
elif type == 0x01:
pass
else:
sys.stderr.write("Ignored unknown field (type 0x%02x) in ihex file.\n" % type)
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
def loadTIText(self, file):
"""load data from a (opened) file in TI-Text format"""
next = 1
currentAddr = 0
startAddr = 0
segmentdata = []
#Convert data for MSP430, TXT-File is parsed line by line
while next >= 1:
#Read one line
l = file.readline()
if not l: break #EOF
l = l.strip()
if l[0] == 'q': break
elif l[0] == '@': #if @ => new address => send frame and set new addr.
#create a new segment
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
startAddr = currentAddr = int(l[1:],16)
segmentdata = []
else:
for i in l.split():
segmentdata.append(chr(int(i,16)))
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
def loadELF(self, file):
"""load data from a (opened) file in ELF object format.
File must be seekable"""
import elf
obj = elf.ELFObject()
obj.fromFile(file)
if obj.e_type != elf.ELFObject.ET_EXEC:
raise Exception("No executable")
for section in obj.getSections():
if DEBUG:
sys.stderr.write("ELF section %s at 0x%04x %d bytes\n" % (section.name, section.lma, len(section.data)))
if len(section.data):
self.segments.append( Segment(section.lma, section.data) )
def loadFile(self, filename):
"""fill memory with the contents of a file. file type is determined from extension"""
#TODO: do a contents based detection
if filename[-4:].lower() == '.txt':
self.loadTIText(open(filename, "rb"))
elif filename[-4:].lower() in ('.a43', '.hex'):
self.loadIHex(open(filename, "rb"))
else:
self.loadELF(open(filename, "rb"))
def getMemrange(self, fromadr, toadr):
"""get a range of bytes from the memory. unavailable values are filled with 0xff."""
res = ''
toadr = toadr + 1 #python indxes are excluding end, so include it
while fromadr < toadr:
for seg in self.segments:
segend = seg.startaddress + len(seg.data)
if seg.startaddress <= fromadr and fromadr < segend:
if toadr > segend: #not all data in segment
catchlength = segend-fromadr
else:
catchlength = toadr-fromadr
res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]
fromadr = fromadr + catchlength #adjust start
if len(res) >= toadr-fromadr:
break #return res
else: #undefined memory is filled with 0xff
res = res + chr(255)
fromadr = fromadr + 1 #adjust start
return res
class JTAG:
"""wrap the _parjtag extension"""
def __init__(self):
self.showprogess = 0
def connect(self, lpt=None):
"""connect to specified or default port"""
if lpt is None:
_parjtag.connect()
else:
_parjtag.connect(lpt)
def close(self):
"""release JTAG"""
_parjtag.release()
def uploadData(self, startaddress, size):
"""upload a datablock"""
if DEBUG > 1: sys.stderr.write("* uploadData()\n")
return _parjtag.memread(startaddress, size)
def actionMassErase(self):
"""Erase the flash memory completely (with mass erase command)"""
sys.stderr.write("Mass Erase...\n")
_parjtag.memerase(ERASE_MASS)
def actionMainErase(self):
"""Erase the MAIN flash memory, leave the INFO mem"""
sys.stderr.write("Erase Main Flash...\n")
_parjtag.memerase(ERASE_MAIN, 0xfffe)
def makeActionSegmentErase(self, address):
"""Selective segment erase"""
class SegmentEraser:
def __init__(self, segaddr):
self.address = segaddr
def __call__(self):
sys.stderr.write("Erase Segment @ 0x%04x...\n" % self.address)
_parjtag.memerase(ERASE_SGMT, self.address)
return SegmentEraser(address)
def actionEraseCheck(self):
"""check the erasure of required flash cells."""
sys.stderr.write("Erase Check by file ...\n")
if self.data is not None:
for seg in self.data:
data = _parjtag.memread(seg.startaddress, len(seg.data))
if data != '\xff'*len(seg.data): raise JTAGException("Erase check failed")
else:
raise JTAGException("cannot do erase check against data with not knowing the actual data")
def progess_update(self, count, total):
sys.stderr.write("\r%d%%" % (100*count/total))
def actionProgram(self):
"""program data into flash memory."""
if self.data is not None:
sys.stderr.write("Program ...\n")
if self.showprogess:
_parjtag.set_flash_callback(self.progess_update)
bytes = 0
for seg in self.data:
_parjtag.memwrite(seg.startaddress, seg.data)
bytes += len(seg.data)
if self.showprogess:
sys.stderr.write("\r")
sys.stderr.write("%i bytes programmed.\n" % bytes)
else:
raise JTAGException("programming without data not possible")
def actionVerify(self):
"""Verify programmed data"""
if self.data is not None:
sys.stderr.write("Verify ...\n")
for seg in self.data:
data = _parjtag.memread(seg.startaddress, len(seg.data))
if data != seg.data: raise JTAGException("Verify failed")
else:
raise JTAGException("verify without data not possible")
def actionReset(self):
"""perform a reset"""
sys.stderr.write("Reset device ...\n")
_parjtag.reset(0, 0)
def actionRun(self, address):
"""start program at specified address"""
raise NotImplementedError
#sys.stderr.write("Load PC with 0x%04x ...\n" % address)
def funclet(self):
"""download and start funclet"""
sys.stderr.write("Download and execute of funclet...\n")
if len(self.data) > 1:
raise JTAGException("don't know how to handle multiple segments in funclets")
_parjtag.funclet(self.data[0].data)
sys.stderr.write("Funclet OK.\n")
def usage():
"""print some help message"""
sys.stderr.write("""
USAGE: %s [options] [file]
Version: %s
If "-" is specified as file the data is read from the stdinput.
A file ending with ".txt" is considered to be in TIText format all
other filenames are considered IntelHex.
General options:
-h, --help Show this help screen.
-l, --lpt=name Specify an other parallel port.
(defaults to LPT1 (/dev/parport0 on unix)
-D, --debug Increase level of debug messages. This won't be
very useful for the average user...
-I, --intelhex Force fileformat to IntelHex
-T, --titext Force fileformat to be TIText
-f, --funclet The given file is a funclet (a small program to
be run in RAM)
-R, --ramsize Specify the amont of RAM to be used to program
flash (default 256).
Program Flow Specifiers:
-e, --masserase Mass Erase (clear all flash memory)
-m, --mainerase Erase main flash memory only
--eraseinfo Erase info flash memory only (0x1000-0x10ff)
--erase=address Selectively erase segment at the specified address
-E, --erasecheck Erase Check by file
-p, --program Program file
-v, --verify Verify by file
The order of the above options matters! The table is ordered by normal
execution order. For the options "Epv" a file must be specified.
Program flow specifiers default to "p" if a file is given.
Don't forget to specify "e" or "eE" when programming flash!
"p" already verifies the programmed data, "v" adds an additional
verification though uploading the written data for a 1:1 compare.
No default action is taken if "p" and/or "v" is given, say specifying
only "v" does a check by file of a programmed device.
Data retreiving:
-u, --upload=addr Upload a datablock (see also: -s).
-s, --size=num Size of the data block do upload. (Default is 2)
-x, --hex Show a hexadecimal display of the uploaded data.
(Default)
-b, --bin Get binary uploaded data. This can be used
to redirect the output into a file.
-i, --ihex Uploaded data is output in Intel HEX format.
This can be used to clone a device.
Do before exit:
-g, --go=address Start programm execution at specified address.
This implies option "w" (wait)
-r, --reset Reset connected MSP430. Starts application.
This is a normal device reset and will start
the programm that is specified in the reset
interrupt vector. (see also -g)
-w, --wait Wait for <ENTER> before closing parallel port.
""" % (sys.argv[0], VERSION))
def main():
global DEBUG
import getopt
filetype = None
filename = None
reset = 0
wait = 0
goaddr = None
jtag = JTAG()
toinit = []
todo = []
startaddr = None
size = 2
outputformat= HEX
lpt = None
funclet = None
ramsize = None
sys.stderr.write("MSP430 parallel JTAG programmer Version: %s\n" % VERSION)
try:
opts, args = getopt.getopt(sys.argv[1:],
"hl:weEmpvrg:Du:d:s:xbiITfR:S",
["help", "lpt=", "wait"
"masserase", "erasecheck", "mainerase", "program",
"erase=", "eraseinfo",
"verify", "reset", "go=", "debug",
"upload=", "download=", "size=", "hex", "bin", "ihex",
"intelhex", "titext", "funclet", "ramsize=", "progress"]
)
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-l", "--lpt"):
lpt = a
elif o in ("-w", "--wait"):
wait = 1
elif o in ("-e", "--masserase"):
toinit.append(jtag.actionMassErase) #Erase Flash
elif o in ("-E", "--erasecheck"):
toinit.append(jtag.actionEraseCheck) #Erase Check (by file)
elif o in ("-m", "--mainerase"):
toinit.append(jtag.actionMainErase) #Erase main Flash
elif o == "--erase":
try:
seg = int(a, 0)
toinit.append(jtag.makeActionSegmentErase(seg))
except ValueError:
sys.stderr.write("segment address must be a valid number in dec, hex or octal\n")
sys.exit(2)
elif o == "--eraseinfo":
toinit.append(jtag.makeActionSegmentErase(0x1000))
toinit.append(jtag.makeActionSegmentErase(0x1080))
elif o in ("-p", "--program"):
todo.append(jtag.actionProgram) #Program file
elif o in ("-v", "--verify"):
todo.append(jtag.actionVerify) #Verify file
elif o in ("-r", "--reset"):
reset = 1
elif o in ("-g", "--go"):
try:
goaddr = int(a, 0) #try to convert decimal
except ValueError:
sys.stderr.write("upload address must be a valid number in dec, hex or octal\n")
sys.exit(2)
elif o in ("-D", "--debug"):
DEBUG = DEBUG + 1
elif o in ("-u", "--upload"):
try:
startaddr = int(a, 0) #try to convert number of any base
except ValueError:
sys.stderr.write("upload address must be a valid number in dec, hex or octal\n")
sys.exit(2)
elif o in ("-s", "--size"):
try:
size = int(a, 0)
except ValueError:
sys.stderr.write("upload address must be a valid number in dec, hex or octal\n")
sys.exit(2)
#outut formats
elif o in ("-x", "--hex"):
outputformat = HEX
elif o in ("-b", "--bin"):
outputformat = BINARY
elif o in ("-i", "--ihex"):
outputformat = INTELHEX
#input formats
elif o in ("-I", "--intelhex"):
filetype = 0
elif o in ("-T", "--titext"):
filetype = 1
#others
elif o in ("-f", "--funclet"):
funclet = 1
elif o in ("-R", "--ramsize"):
try:
ramsize = int(a, 0)
except ValueError:
sys.stderr.write("ramsize must be a valid number in dec, hex or octal\n")
sys.exit(2)
elif o in ("-S", "--progress"):
jtag.showprogess = 1
if len(args) == 0:
sys.stderr.write("Use -h for help\n")
elif len(args) == 1: #a filename is given
if not funclet:
if not todo: #if there are no actions yet
todo.extend([ #add some useful actions...
jtag.actionProgram,
])
filename = args[0]
else: #number of args is wrong
usage()
sys.exit(2)
if DEBUG: #debug infos
sys.stderr.write("debug level set to %d\n" % DEBUG)
_parjtag.configure(DEBUG_OPTION, DEBUG)
sys.stderr.write("python version: %s\n" % sys.version)
#sanity check of options
if goaddr and reset:
sys.stderr.write("Warning: option --reset ignored as --go is specified!\n")
reset = 0
if startaddr and reset:
sys.stderr.write("Warning: option --reset ignored as --upload is specified!\n")
reset = 0
#prepare data to download
jtag.data = Memory() #prepare downloaded data
if filetype is not None: #if the filetype is given...
if filename is None:
raise ValueError("no filename but filetype specified")
if filename == '-': #get data from stdin
file = sys.stdin
else:
file = open(filename,"rb") #or from a file
if filetype == 0: #select load function
jtag.data.loadIHex(file) #intel hex
elif filetype == 1:
jtag.data.loadTIText(file) #TI's format
else:
raise ValueError("illegal filetype specified")
else: #no filetype given...
if filename == '-': #for stdin:
jtag.data.loadIHex(sys.stdin) #assume intel hex
elif filename:
jtag.data.loadFile(filename) #autodetect otherwise
if DEBUG > 5: sys.stderr.write("File: %r\n" % filename)
try:
jtag.connect(lpt) #try to open port
except IOError:
raise #do not handle here
else: #continue if open was successful
if ramsize is not None:
_parjtag.configure(RAMSIZE_OPTION, ramsize)
#initialization list
if toinit: #erase and erase check
if DEBUG: sys.stderr.write("Preparing device ...\n")
for f in toinit: f()
#work list
if todo:
if DEBUG > 0: #debug
#show a nice list of sheduled actions
sys.stderr.write("TODO list:\n")
for f in todo:
try:
sys.stderr.write(" %s\n" % f.func_name)
except AttributeError:
sys.stderr.write(" %r\n" % f)
for f in todo: f() #work through todo list
if reset: #reset device first if desired
jtag.actionReset()
if funclet is not None: #download and start funclet
jtag.funclet()
if goaddr is not None: #start user programm at specified address
jtag.actionRun(goaddr) #load PC and execute
#upload datablock and output
if startaddr is not None:
if goaddr: #if a program was started...
raise NotImplementedError
#TODO:
#sys.stderr.write("Waiting to device for reconnect for upload: ")
data = jtag.uploadData(startaddr, size) #upload data
if outputformat == HEX: #depending on output format
hexdump( (startaddr, data) ) #print a hex display
elif outputformat == INTELHEX:
makeihex( (startaddr, data) ) #ouput a intel-hex file
else:
sys.stdout.write(data) #binary output w/o newline!
wait = 0 #wait makes no sense as after the upload the device is still stopped
if wait: #wait at the end if desired
sys.stderr.write("Press <ENTER> ...\n") #display a prompt
raw_input() #wait for newline
_parjtag.reset(1, 1) #reset and release target
#~ jtag.actionReset()
jtag.close() #Release communication port
if __name__ == '__main__':
try:
main()
except SystemExit:
raise #let pass exit() calls
except KeyboardInterrupt:
if DEBUG: raise #show full trace in debug mode
sys.stderr.write("user abort.\n") #short messy in user mode
sys.exit(1) #set errorlevel for script usage
except Exception, msg: #every Exception is caught and displayed
if DEBUG: raise #show full trace in debug mode
sys.stderr.write("\nAn error occoured:\n%s\n" % msg) #short messy in user mode
sys.exit(1) #set errorlevel for script usage
|
|
#!/usr/bin/env python
#
# Copyright 2017 Vitalii Kulanov
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import logging.handlers
import sys
import boto3
import requests
from botocore.exceptions import ClientError
from botocore.exceptions import BotoCoreError
import client
import validator
import utils
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
CONSOLE_LOG_FORMAT = '%(levelname)s: %(message)s'
FILE_LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(levelname)s '
'(%(module)s) %(message)s')
LOG_FILE_SIZE = 10485760 # 10MB
ROTATE_LOG_FILE_COUNT = 5
def get_settings(file_path):
"""Gets settings data from configuration file.
:param file_path: path to configuration file
:type file_path: str
:return: data settings from configuration file
:rtype: dict
"""
try:
data = validator.validate_file_by_schema(validator.CONFIG_SCHEMA,
file_path)
except (ValueError, OSError, IOError) as e:
logging.error("Received error: {}".format(e), exc_info=True)
raise
return data
def get_metrics_data(client_api, metrics):
"""Gets all data for specific metrics.
:param client_api: API client, that handles API requests
:type client_api: client.APIClient
:param metrics: list of metrics to retrieve data
:type metrics: list
:return: list of metrics data
:rtype: list[dict]
"""
logging.info("Start fetching metrics from Prometheus.")
metrics_data_list = []
for metric in metrics:
params = {'query': metric}
try:
data = client_api.get_request('query', params)
except requests.exceptions.RequestException as e:
logging.error("Received error: {}".format(e), exc_info=True)
raise
# Prometheus returns false-positive result for non-existent metrics.
# We have to skip non-existent metrics, i.e. those with empty data
if not data['data']['result']:
logging.warning("Metric '{0}' not found.".format(metric))
continue
metrics_data_list.append(data)
logging.info("{0} out of {1} metrics were successfully fetched from "
"Prometheus.".format(len(metrics_data_list), len(metrics)))
return metrics_data_list
def create_metric_dimensions(data):
"""Creates metric dimensions based on retrieved unique data.
:param data: extra information about metric
:type data: dict
:return: metric dimensions as a list of dimension dict
[{'Name': 'string', 'Value': 'string'}, {...}, {...}]
:rtype: list[dict]
"""
ignored = ('__name__',)
return [{'Name': k, 'Value': v} for k, v in data.items()
if k not in ignored]
def convert_value(value):
"""Converts metric value to float."""
try:
return float(value)
except ValueError:
return value
def prepare_single_metric(name, value, dimensions, timestamp, unit='None'):
"""Creates CloudWatch valid metric data format."""
return {
'MetricName': name,
'Dimensions': dimensions,
'Timestamp': timestamp,
'Value': convert_value(value),
'Unit': unit
}
def prepare_metrics(data):
"""Converts Prometheus metric data format to CloudWatch one.
:param data: list of metrics data in Prometheus-like format
:type data: list[dict]
:return: list of metrics data in CloudWatch-like format
:rtype: list[dict]
"""
logging.info("Start converting metrics to CloudWatch format.")
metrics = []
for item in data:
for i in item['data']['result']:
single_metric_data = prepare_single_metric(
name=i['metric']['__name__'],
value=i['value'][1],
dimensions=create_metric_dimensions(i['metric']),
timestamp=i['value'][0],
unit='Count'
)
metrics.append(single_metric_data)
logging.info("{0} metrics are ready to be pushed to "
"CloudWatch.".format(len(metrics)))
return metrics
def chunks(data, n):
"""Yield successive n-sized chunks from metrics data list."""
for i in range(0, len(data), n):
yield data[i:i + n]
def configure_logging(level=logging.INFO, file_path=None):
logging.basicConfig(level=level, format=CONSOLE_LOG_FORMAT)
if file_path:
fh = logging.handlers.RotatingFileHandler(
filename=file_path,
maxBytes=LOG_FILE_SIZE,
backupCount=ROTATE_LOG_FILE_COUNT
)
fh.setLevel(level=level)
formatter = logging.Formatter(fmt=FILE_LOG_FORMAT, datefmt=DATE_FORMAT)
fh.setFormatter(formatter)
logging.getLogger('').addHandler(fh)
def main():
parser = argparse.ArgumentParser(description='CloudWatch metrics importer')
parser.add_argument('-c',
'--config',
metavar='CONFIG_FILE',
required=True,
help='Configuration file.')
parser.add_argument('-d',
'--dump',
choices=['prometheus', 'cloudwatch'],
help='Dump metrics to file and exit.')
parser.add_argument('-f',
'--format',
choices=utils.SUPPORTED_FILE_FORMATS,
default='json',
help='Format of metrics file dump. Defaults to json.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Increase output verbosity.')
parser.add_argument('--log-file',
help='Log file to store logs.')
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
log_file = args.log_file if args.log_file else None
configure_logging(level=level, file_path=log_file)
logging.info("Start reading configuration from "
"file '{0}'.".format(args.config))
settings = get_settings(args.config)
url = settings.get('url')
metrics = settings.get('metrics')
namespace = settings.get('namespace')
aws_region = settings.get('aws-region')
# APIClient to fetch data from Prometheus
client_api = client.APIClient(url=url)
metrics_data = get_metrics_data(client_api, metrics)
cw_metrics_data = prepare_metrics(metrics_data)
dump_type = {'prometheus': metrics_data, 'cloudwatch': cw_metrics_data}
if args.dump:
file_name = "{0}.{1}".format(args.dump, args.format)
utils.write_to_file(file_name, dump_type[args.dump])
logging.info("Dump file '{0}' successfully created".format(file_name))
sys.exit()
logging.info("Start pushing metrics to CloudWatch.")
try:
cw_client = boto3.client('cloudwatch', region_name=aws_region)
# Split imported metrics list in chunks,
# since only 20/PutMetricData per request is allowed
for chunk in chunks(cw_metrics_data, 20):
cw_client.put_metric_data(Namespace=namespace, MetricData=chunk)
except (BotoCoreError, ClientError) as e:
logging.error("Received error: {}".format(e), exc_info=True)
raise
logging.info("Metrics were successfully pushed to CloudWatch.")
if __name__ == "__main__":
main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Custom Scalars plugin.
This plugin lets the user create scalars plots with custom run-tag combinations
by specifying regular expressions.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from google.protobuf import json_format
import numpy as np
import tensorflow as tf
from werkzeug import wrappers
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.custom_scalar import layout_pb2
from tensorboard.plugins.custom_scalar import metadata
from tensorboard.plugins.scalar import metadata as scalars_metadata
from tensorboard.plugins.scalar import scalars_plugin
# The name of the property in the response for whether the regex is valid.
_REGEX_VALID_PROPERTY = 'regex_valid'
# The name of the property in the response for the payload (tag to ScalarEvents
# mapping).
_TAG_TO_EVENTS_PROPERTY = 'tag_to_events'
# The number of seconds to wait in between checks for the config file specifying
# layout.
_CONFIG_FILE_CHECK_THROTTLE = 60
class CustomScalarsPlugin(base_plugin.TBPlugin):
"""CustomScalars Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ScalarsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._logdir = context.logdir
self._multiplexer = context.multiplexer
self._plugin_name_to_instance = context.plugin_name_to_instance
def _get_scalars_plugin(self):
"""Tries to get the scalars plugin.
Returns:
The scalars plugin. Or None if it is not yet registered.
"""
if scalars_metadata.PLUGIN_NAME in self._plugin_name_to_instance:
# The plugin is registered.
return self._plugin_name_to_instance[scalars_metadata.PLUGIN_NAME]
# The plugin is not yet registered.
return None
def get_plugin_apps(self):
return {
'/download_data': self.download_data_route,
'/layout': self.layout_route,
'/scalars': self.scalars_route,
}
def is_active(self):
"""This plugin is active if 2 conditions hold.
1. The scalars plugin is registered and active.
2. There is a custom layout for the dashboard.
Returns: A boolean. Whether the plugin is active.
"""
if not self._multiplexer:
return False
scalars_plugin_instance = self._get_scalars_plugin()
if not (scalars_plugin_instance and
scalars_plugin_instance.is_active()):
return False
# This plugin is active if any run has a layout.
return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))
@wrappers.Request.application
def download_data_route(self, request):
run = request.args.get('run')
tag = request.args.get('tag')
response_format = request.args.get('format')
try:
body, mime_type = self.download_data_impl(run, tag, response_format)
except ValueError as e:
return http_util.Respond(
request=request,
content=str(e),
content_type='text/plain',
code=500)
return http_util.Respond(request, body, mime_type)
def download_data_impl(self, run, tag, response_format):
"""Provides a response for downloading scalars data for a data series.
Args:
run: The run.
tag: The specific tag.
response_format: A string. One of the values of the OutputFormat enum of
the scalar plugin.
Raises:
ValueError: If the scalars plugin is not registered.
Returns:
2 entities:
- A JSON object response body.
- A mime type (string) for the response.
"""
scalars_plugin_instance = self._get_scalars_plugin()
if not scalars_plugin_instance:
raise ValueError(('Failed to respond to request for /download_data. '
'The scalars plugin is oddly not registered.'))
body, mime_type = scalars_plugin_instance.scalars_impl(
tag, run, response_format)
return body, mime_type
@wrappers.Request.application
def scalars_route(self, request):
"""Given a tag regex and single run, return ScalarEvents.
This route takes 2 GET params:
run: A run string to find tags for.
tag: A string that is a regex used to find matching tags.
The response is a JSON object:
{
// Whether the regular expression is valid. Also false if empty.
regexValid: boolean,
// An object mapping tag name to a list of ScalarEvents.
payload: Object<string, ScalarEvent[]>,
}
"""
# TODO: return HTTP status code for malformed requests
tag_regex_string = request.args.get('tag')
run = request.args.get('run')
mime_type = 'application/json'
try:
body = self.scalars_impl(run, tag_regex_string)
except ValueError as e:
return http_util.Respond(
request=request,
content=str(e),
content_type='text/plain',
code=500)
# Produce the response.
return http_util.Respond(request, body, mime_type)
def scalars_impl(self, run, tag_regex_string):
"""Given a tag regex and single run, return ScalarEvents.
Args:
run: A run string.
tag_regex_string: A regular expression that captures portions of tags.
Raises:
ValueError: if the scalars plugin is not registered.
Returns:
A dictionary that is the JSON-able response.
"""
if not tag_regex_string:
# The user provided no regex.
return {
_REGEX_VALID_PROPERTY: False,
_TAG_TO_EVENTS_PROPERTY: {},
}
# Construct the regex.
try:
regex = re.compile(tag_regex_string)
except re.error:
return {
_REGEX_VALID_PROPERTY: False,
_TAG_TO_EVENTS_PROPERTY: {},
}
# Fetch the tags for the run. Filter for tags that match the regex.
run_to_data = self._multiplexer.PluginRunToTagToContent(
scalars_metadata.PLUGIN_NAME)
tag_to_data = None
try:
tag_to_data = run_to_data[run]
except KeyError:
# The run could not be found. Perhaps a configuration specified a run that
# TensorBoard has not read from disk yet.
payload = {}
if tag_to_data:
scalars_plugin_instance = self._get_scalars_plugin()
if not scalars_plugin_instance:
raise ValueError(('Failed to respond to request for /scalars. '
'The scalars plugin is oddly not registered.'))
form = scalars_plugin.OutputFormat.JSON
payload = {tag: scalars_plugin_instance.scalars_impl(tag, run, form)[0]
for tag in tag_to_data.keys()
if regex.match(tag)}
return {
_REGEX_VALID_PROPERTY: True,
_TAG_TO_EVENTS_PROPERTY: payload,
}
@wrappers.Request.application
def layout_route(self, request):
r"""Fetches the custom layout specified by the config file in the logdir.
If more than 1 run contains a layout, this method merges the layouts by
merging charts within individual categories. If 2 categories with the same
name are found, the charts within are merged. The merging is based on the
order of the runs to which the layouts are written.
The response is a JSON object mirroring properties of the Layout proto if a
layout for any run is found.
The response is an empty object if no layout could be found.
"""
body = self.layout_impl()
return http_util.Respond(request, body, 'application/json')
def layout_impl(self):
# Keep a mapping between and category so we do not create duplicate
# categories.
title_to_category = {}
merged_layout = None
runs = list(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))
runs.sort()
for run in runs:
tensor_events = self._multiplexer.Tensors(
run, metadata.CONFIG_SUMMARY_TAG)
# This run has a layout. Merge it with the ones currently found.
string_array = tf.make_ndarray(tensor_events[0].tensor_proto)
content = np.asscalar(string_array)
layout_proto = layout_pb2.Layout()
layout_proto.ParseFromString(tf.compat.as_bytes(content))
if merged_layout:
# Append the categories within this layout to the merged layout.
for category in layout_proto.category:
if category.title in title_to_category:
# A category with this name has been seen before. Do not create a
# new one. Merge their charts.
title_to_category[category.title].chart.extend(category.chart)
else:
# This category has not been seen before.
merged_layout.category.add().MergeFrom(category)
title_to_category[category.title] = category
else:
# This is the first layout encountered.
merged_layout = layout_proto
for category in layout_proto.category:
title_to_category[category.title] = category
if merged_layout:
return json_format.MessageToJson(
merged_layout, including_default_value_fields=True)
else:
# No layout was found.
return {}
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Holds all the information relevant to the client (addresses for instance)
"""
from six import with_metaclass
from django.db import models
from django.template import Context
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _
from shop import deferred
from shop.conf import app_settings
class AddressManager(models.Manager):
def get_max_priority(self, customer):
aggr = self.get_queryset().filter(customer=customer).aggregate(models.Max('priority'))
priority = aggr['priority__max'] or 0
return priority
def get_fallback(self, customer):
"""
Return a fallback address, whenever the customer has not declared one.
"""
return self.get_queryset().filter(customer=customer).order_by('priority').last()
class BaseAddress(models.Model):
customer = deferred.ForeignKey('BaseCustomer')
priority = models.SmallIntegerField(
default=0,
db_index=True,
help_text=_("Priority for using this address"),
)
class Meta:
abstract = True
objects = AddressManager()
def as_text(self):
"""
Return the address as plain text to be used for printing, etc.
"""
template_names = [
'{}/{}-address.txt'.format(app_settings.APP_LABEL, self.address_type),
'{}/address.txt'.format(app_settings.APP_LABEL),
'shop/address.txt',
]
template = select_template(template_names)
context = Context({'address': self})
return template.render(context)
as_text.short_description = _("Address")
class BaseShippingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'shipping'
class Meta:
abstract = True
ShippingAddressModel = deferred.MaterializedModel(BaseShippingAddress)
class BaseBillingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'billing'
class Meta:
abstract = True
BillingAddressModel = deferred.MaterializedModel(BaseBillingAddress)
ISO_3166_CODES = (
('AF', _("Afghanistan")),
('AX', _("Aland Islands")),
('AL', _("Albania")),
('DZ', _("Algeria")),
('AS', _("American Samoa")),
('AD', _("Andorra")),
('AO', _("Angola")),
('AI', _("Anguilla")),
('AQ', _("Antarctica")),
('AG', _("Antigua And Barbuda")),
('AR', _("Argentina")),
('AM', _("Armenia")),
('AW', _("Aruba")),
('AU', _("Australia")),
('AT', _("Austria")),
('AZ', _("Azerbaijan")),
('BS', _("Bahamas")),
('BH', _("Bahrain")),
('BD', _("Bangladesh")),
('BB', _("Barbados")),
('BY', _("Belarus")),
('BE', _("Belgium")),
('BZ', _("Belize")),
('BJ', _("Benin")),
('BM', _("Bermuda")),
('BT', _("Bhutan")),
('BO', _("Bolivia, Plurinational State Of")),
('BQ', _("Bonaire, Saint Eustatius And Saba")),
('BA', _("Bosnia And Herzegovina")),
('BW', _("Botswana")),
('BV', _("Bouvet Island")),
('BR', _("Brazil")),
('IO', _("British Indian Ocean Territory")),
('BN', _("Brunei Darussalam")),
('BG', _("Bulgaria")),
('BF', _("Burkina Faso")),
('BI', _("Burundi")),
('KH', _("Cambodia")),
('CM', _("Cameroon")),
('CA', _("Canada")),
('CV', _("Cape Verde")),
('KY', _("Cayman Islands")),
('CF', _("Central African Republic")),
('TD', _("Chad")),
('CL', _("Chile")),
('CN', _("China")),
('CX', _("Christmas Island")),
('CC', _("Cocos (Keeling) Islands")),
('CO', _("Colombia")),
('KM', _("Comoros")),
('CG', _("Congo")),
('CD', _("Congo, The Democratic Republic Of The")),
('CK', _("Cook Islands")),
('CR', _("Costa Rica")),
('HR', _("Croatia")),
('CU', _("Cuba")),
('CW', _("Curacao")),
('CY', _("Cyprus")),
('CZ', _("Czech Republic")),
('DK', _("Denmark")),
('DJ', _("Djibouti")),
('DM', _("Dominica")),
('DO', _("Dominican Republic")),
('EC', _("Ecuador")),
('EG', _("Egypt")),
('SV', _("El Salvador")),
('GQ', _("Equatorial Guinea")),
('ER', _("Eritrea")),
('EE', _("Estonia")),
('ET', _("Ethiopia")),
('FK', _("Falkland Islands (Malvinas)")),
('FO', _("Faroe Islands")),
('FJ', _("Fiji")),
('FI', _("Finland")),
('FR', _("France")),
('GF', _("French Guiana")),
('PF', _("French Polynesia")),
('TF', _("French Southern Territories")),
('GA', _("Gabon")),
('GM', _("Gambia")),
('DE', _("Germany")),
('GH', _("Ghana")),
('GI', _("Gibraltar")),
('GR', _("Greece")),
('GL', _("Greenland")),
('GD', _("Grenada")),
('GP', _("Guadeloupe")),
('GU', _("Guam")),
('GT', _("Guatemala")),
('GG', _("Guernsey")),
('GN', _("Guinea")),
('GW', _("Guinea-Bissau")),
('GY', _("Guyana")),
('HT', _("Haiti")),
('HM', _("Heard Island and McDonald Islands")),
('VA', _("Holy See (Vatican City State)")),
('HN', _("Honduras")),
('HK', _("Hong Kong")),
('HU', _("Hungary")),
('IS', _("Iceland")),
('IN', _("India")),
('ID', _("Indonesia")),
('IR', _("Iran, Islamic Republic Of")),
('IQ', _("Iraq")),
('IE', _("Ireland")),
('IL', _("Israel")),
('IT', _("Italy")),
('CI', _("Ivory Coast")),
('JM', _("Jamaica")),
('JP', _("Japan")),
('JE', _("Jersey")),
('JO', _("Jordan")),
('KZ', _("Kazakhstan")),
('KE', _("Kenya")),
('KI', _("Kiribati")),
('KP', _("Korea, Democratic People's Republic Of")),
('KR', _("Korea, Republic Of")),
('KS', _("Kosovo")),
('KW', _("Kuwait")),
('KG', _("Kyrgyzstan")),
('LA', _("Lao People's Democratic Republic")),
('LV', _("Latvia")),
('LB', _("Lebanon")),
('LS', _("Lesotho")),
('LR', _("Liberia")),
('LY', _("Libyan Arab Jamahiriya")),
('LI', _("Liechtenstein")),
('LT', _("Lithuania")),
('LU', _("Luxembourg")),
('MO', _("Macao")),
('MK', _("Macedonia")),
('MG', _("Madagascar")),
('MW', _("Malawi")),
('MY', _("Malaysia")),
('MV', _("Maldives")),
('ML', _("Mali")),
('ML', _("Malta")),
('MH', _("Marshall Islands")),
('MQ', _("Martinique")),
('MR', _("Mauritania")),
('MU', _("Mauritius")),
('YT', _("Mayotte")),
('MX', _("Mexico")),
('FM', _("Micronesia")),
('MD', _("Moldova")),
('MC', _("Monaco")),
('MN', _("Mongolia")),
('ME', _("Montenegro")),
('MS', _("Montserrat")),
('MA', _("Morocco")),
('MZ', _("Mozambique")),
('MM', _("Myanmar")),
('NA', _("Namibia")),
('NR', _("Nauru")),
('NP', _("Nepal")),
('NL', _("Netherlands")),
('AN', _("Netherlands Antilles")),
('NC', _("New Caledonia")),
('NZ', _("New Zealand")),
('NI', _("Nicaragua")),
('NE', _("Niger")),
('NG', _("Nigeria")),
('NU', _("Niue")),
('NF', _("Norfolk Island")),
('MP', _("Northern Mariana Islands")),
('NO', _("Norway")),
('OM', _("Oman")),
('PK', _("Pakistan")),
('PW', _("Palau")),
('PS', _("Palestinian Territory, Occupied")),
('PA', _("Panama")),
('PG', _("Papua New Guinea")),
('PY', _("Paraguay")),
('PE', _("Peru")),
('PH', _("Philippines")),
('PN', _("Pitcairn")),
('PL', _("Poland")),
('PT', _("Portugal")),
('PR', _("Puerto Rico")),
('QA', _("Qatar")),
('RE', _("Reunion")),
('RO', _("Romania")),
('RU', _("Russian Federation")),
('RW', _("Rwanda")),
('BL', _("Saint Barthelemy")),
('SH', _("Saint Helena, Ascension & Tristan Da Cunha")),
('KN', _("Saint Kitts and Nevis")),
('LC', _("Saint Lucia")),
('MF', _("Saint Martin (French Part)")),
('PM', _("Saint Pierre and Miquelon")),
('VC', _("Saint Vincent And The Grenadines")),
('WS', _("Samoa")),
('SM', _("San Marino")),
('ST', _("Sao Tome And Principe")),
('SA', _("Saudi Arabia")),
('SN', _("Senegal")),
('RS', _("Serbia")),
('SC', _("Seychelles")),
('SL', _("Sierra Leone")),
('SG', _("Singapore")),
('SX', _("Sint Maarten (Dutch Part)")),
('SK', _("Slovakia")),
('SI', _("Slovenia")),
('SB', _("Solomon Islands")),
('SO', _("Somalia")),
('ZA', _("South Africa")),
('GS', _("South Georgia And The South Sandwich Islands")),
('ES', _("Spain")),
('LK', _("Sri Lanka")),
('SD', _("Sudan")),
('SR', _("Suriname")),
('SJ', _("Svalbard And Jan Mayen")),
('SZ', _("Swaziland")),
('SE', _("Sweden")),
('CH', _("Switzerland")),
('SY', _("Syrian Arab Republic")),
('TW', _("Taiwan")),
('TJ', _("Tajikistan")),
('TZ', _("Tanzania")),
('TH', _("Thailand")),
('TL', _("Timor-Leste")),
('TG', _("Togo")),
('TK', _("Tokelau")),
('TO', _("Tonga")),
('TT', _("Trinidad and Tobago")),
('TN', _("Tunisia")),
('TR', _("Turkey")),
('TM', _("Turkmenistan")),
('TC', _("Turks And Caicos Islands")),
('TV', _("Tuvalu")),
('UG', _("Uganda")),
('UA', _("Ukraine")),
('AE', _("United Arab Emirates")),
('GB', _("United Kingdom")),
('US', _("United States")),
('UM', _("United States Minor Outlying Islands")),
('UY', _("Uruguay")),
('UZ', _("Uzbekistan")),
('VU', _("Vanuatu")),
('VE', _("Venezuela, Bolivarian Republic Of")),
('VN', _("Viet Nam")),
('VG', _("Virgin Islands, British")),
('VI', _("Virgin Islands, U.S.")),
('WF', _("Wallis and Futuna")),
('EH', _("Western Sahara")),
('YE', _("Yemen")),
('ZM', _("Zambia")),
('ZW', _("Zimbabwe")),
)
class CountryField(models.CharField):
"""
This creates a simple input field to choose a country.
"""
def __init__(self, *args, **kwargs):
defaults = {
'max_length': 3,
'choices': ISO_3166_CODES,
}
defaults.update(kwargs)
super(CountryField, self).__init__(*args, **defaults)
def deconstruct(self):
name, path, args, kwargs = super(CountryField, self).deconstruct()
if kwargs['max_length'] == 3:
kwargs.pop('max_length')
if kwargs['choices'] == ISO_3166_CODES:
kwargs.pop('choices')
return name, path, args, kwargs
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import get_model
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
from .base import OrganizationBase, OrganizationUserBase, OrganizationOwnerBase
from .signals import user_added, user_removed, owner_changed
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
ORGS_SLUGFIELD = getattr(settings, 'ORGS_SLUGFIELD',
'django_extensions.db.fields.AutoSlugField')
ORGS_TIMESTAMPED_MODEL = getattr(settings, 'ORGS_TIMESTAMPED_MODEL',
'django_extensions.db.models.TimeStampedModel')
ERR_MSG = """You may need to install django-extensions or similar library. See
the documentation."""
try:
module, klass = ORGS_SLUGFIELD.rsplit('.', 1)
SlugField = getattr(import_module(module), klass)
except:
raise ImproperlyConfigured("Your SlugField class, '{0}',"
" is improperly defined. {1}".format(ORGS_SLUGFIELD, ERR_MSG))
try:
module, klass = ORGS_TIMESTAMPED_MODEL.rsplit('.', 1)
TimeStampedModel = getattr(import_module(module), klass)
except:
raise ImproperlyConfigured("Your TimeStampedBaseModel class, '{0}',"
" is improperly defined. {1}".format(ORGS_TIMESTAMPED_MODEL, ERR_MSG))
def get_user_model():
"""
Returns the chosen user model as a class. This functionality won't be
builtin until Django 1.5.
"""
try:
klass = get_model(USER_MODEL.split('.')[0], USER_MODEL.split('.')[1])
except:
raise ImproperlyConfigured("Your AUTH_USER_MODEL class '{0}'"
" is improperly defined".format(USER_MODEL))
if klass is None:
raise ImproperlyConfigured("Your AUTH_USER_MODEL class '{0}'"
" is not installed".format(USER_MODEL))
return klass
class Organization(OrganizationBase, TimeStampedModel):
"""
Default Organization model.
"""
slug = SlugField(max_length=200, blank=False, editable=True,
populate_from='name', unique=True,
help_text=_("The name in all lowercase, suitable for URL identification"))
class Meta(OrganizationBase.Meta):
verbose_name = _("organization")
verbose_name_plural = _("organizations")
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('organization_detail', kwargs={'organization_pk': self.pk})
def add_user(self, user, is_admin=False):
"""
Adds a new user and if the first user makes the user an admin and
the owner.
"""
users_count = self.users.all().count()
if users_count == 0:
is_admin = True
# TODO get specific org user?
org_user = OrganizationUser.objects.create(user=user,
organization=self, is_admin=is_admin)
if users_count == 0:
# TODO get specific org user?
OrganizationOwner.objects.create(organization=self,
organization_user=org_user)
# User added signal
user_added.send(sender=self, user=user)
return org_user
def remove_user(self, user):
"""
Deletes a user from an organization.
"""
org_user = OrganizationUser.objects.get(user=user,
organization=self)
org_user.delete()
# User removed signal
user_removed.send(sender=self, user=user)
def get_or_add_user(self, user, **kwargs):
"""
Adds a new user to the organization, and if it's the first user makes
the user an admin and the owner. Uses the `get_or_create` method to
create or return the existing user.
`user` should be a user instance, e.g. `auth.User`.
Returns the same tuple as the `get_or_create` method, the
`OrganizationUser` and a boolean value indicating whether the
OrganizationUser was created or not.
"""
is_admin = kwargs.pop('is_admin', False)
users_count = self.users.all().count()
if users_count == 0:
is_admin = True
org_user, created = OrganizationUser.objects.get_or_create(
organization=self, user=user, defaults={'is_admin': is_admin})
if users_count == 0:
OrganizationOwner.objects.create(organization=self,
organization_user=org_user)
if created:
# User added signal
user_added.send(sender=self, user=user)
return org_user, created
def change_owner(self, new_owner):
"""
Changes ownership of an organization.
"""
old_owner = self.owner.organization_user
self.owner.organization_user = new_owner
self.owner.save()
# Owner changed signal
owner_changed.send(sender=self, old=old_owner, new=new_owner)
def is_admin(self, user):
"""
Returns True is user is an admin in the organization, otherwise false
"""
return True if self.organization_users.filter(user=user, is_admin=True) else False
def is_owner(self, user):
"""
Returns True is user is the organization's owner, otherwise false
"""
return self.owner.organization_user.user == user
class OrganizationUser(OrganizationUserBase, TimeStampedModel):
is_admin = models.BooleanField(default=False)
class Meta(OrganizationUserBase.Meta):
verbose_name = _("organization user")
verbose_name_plural = _("organization users")
def __unicode__(self):
return u"{0} ({1})".format(self.name if self.user.is_active else
self.user.email, self.organization.name)
def delete(self, using=None):
"""
If the organization user is also the owner, this should not be deleted
unless it's part of a cascade from the Organization.
If there is no owner then the deletion should proceed.
"""
from organizations.exceptions import OwnershipRequired
try:
if self.organization.owner.organization_user.id == self.id:
raise OwnershipRequired(_("Cannot delete organization owner "
"before organization or transferring ownership."))
# TODO This line presumes that OrgOwner model can't be modified
except OrganizationOwner.DoesNotExist:
pass
super(OrganizationUserBase, self).delete(using=using)
def get_absolute_url(self):
return reverse('organization_user_detail', kwargs={
'organization_pk': self.organization.pk, 'user_pk': self.user.pk})
class OrganizationOwner(OrganizationOwnerBase, TimeStampedModel):
class Meta:
verbose_name = _("organization owner")
verbose_name_plural = _("organization owners")
def save(self, *args, **kwargs):
"""
Extends the default save method by verifying that the chosen
organization user is associated with the organization.
Method validates against the primary key of the organization because
when validating an inherited model it may be checking an instance of
`Organization` against an instance of `CustomOrganization`. Mutli-table
inheritence means the database keys will be identical though.
"""
from organizations.exceptions import OrganizationMismatch
if self.organization_user.organization.pk != self.organization.pk:
raise OrganizationMismatch
else:
super(OrganizationOwnerBase, self).save(*args, **kwargs)
|
|
"""
Atom-based Bootstrap Embedding
"""
import os
import time
import numpy as np
import multiprocessing as mtproc
import itertools as its
from frankenstein.be import BE
from frankenstein.pyscf_be.pysd_atom import pySDATOM
from frankenstein.tools.io_utils import prtvar
"""
Methods for setting up fragments/getting initial guess
"""
def set_be_bad_cons(mb, A, part):
if len(mb.bad_con) > 0: # has already been set
return
for Bs, con in zip(part.matchfraglist[A], part.matchsitelist[A]):
conindA = [mb.msd.fragsites.index(i) for i in con]
if len(con) == 2:
if conindA[0] != conindA[1]:
i,j = conindA
mb.bad_con.append([[i,j], [j,i]])
else:
mb.bad_con.append([conindA])
elif len(con) == 4:
raise ValueError("Currently BEatom does not support 2e-constraints.")
else:
raise ValueError("Unknown constraints: len(con) =", len(con))
def get_nmatch_tot(mbs):
return sum([mb.ncon for mb in mbs])
def set_init_guess(mbatm):
u0 = None
if mbatm.u0 is None:
pass
elif isinstance(mbatm.u0, str):
u0_is_valid = True
if not os.path.isfile(mbatm.u0):
u0_is_valid = False
try:
u0 = np.loadtxt(mbatm.u0)
except:
u0_is_valid = False
if not u0_is_valid:
if mbatm.verbose > 1:
print("[WARNING] Input match pot file is not valid. "
"Discarded.\n", flush=True)
elif isinstance(mbatm.u0, np.ndarray):
u0 = mbatm.u0
else:
if mbatm.verbose > 1:
print("[WARNING] Input match pot is not valid. Discarded.\n", flush=True)
if not u0 is None:
if not u0.ndim == 1:
u0 = None
else:
mbatm.u0_status = "input"
if u0 is None:
mbatm.u0_status = "zeros"
mbatm.u = u0
def distribute_be_match_pot(mbatm, mbs, u=None):
nmatch_tot = get_nmatch_tot(mbs)
if u is None: u = mbatm.u
if u is None or u.size != nmatch_tot: return
istart = 0
for mb in mbs:
mb.u0 = u[istart:istart+mb.ncon].copy()
istart += mb.ncon
"""
Methods for determining matching potentials
"""
def update_be_good_cons(mb, A, part, good_rdm1s):
if good_rdm1s is None or len(good_rdm1s) == 0:
return
mb.good_con = []
for Bs, con in zip(part.matchfraglist[A], part.matchsitelist[A]):
good_val = 0.
fac = 1. / float(len(Bs))
for B in Bs:
if len(con) == 2:
i,j = [part.get_fragsites(B).index(k) for k in con]
good_val += good_rdm1s[B][i,j] * fac
elif len(con) == 4:
raise ValueError("Currently BEatom does not support 2e-constraints.")
else:
raise ValueError("Unknown constraints: len(con) =", len(con))
mb.good_con.append(good_val)
def determine_be_match_pot_frag(mb, A, part, mu, verbose=None):
# add chempot
chempot = get_chempot_mat(mb, A, part, mu)
# solve BE for fragment A self-consistently
if not verbose is None:
verbose_ = mb.verbose
mb.verbose = verbose
mb.heff_extra = chempot
mb.skip_postprocessing = True # turn off post processing
mb.kernel()
if not verbose is None:
mb.verbose = verbose_
mb.skip_postprocessing = False
if mb.u.size == 0:
mb.u = np.zeros(len(mb.bad_con))
mb.heff_extra = None
return mb.u, mb.fc_tot, mb.jc_tot
def determine_be_match_pot_frag_(A):
pass
def determine_be_match_pot(mbatm, mbs):
part = mbatm.msdatm.part
mu = mbatm.mu
good_rdm1s = mbatm.good_rdm1s
if mbatm.nproc == 1:
ulist = []
fc_tot = 0
jc_tot = 0
for A, mb in enumerate(mbs):
update_be_good_cons(mb, A, part, good_rdm1s)
u, fc, jc = determine_be_match_pot_frag(mb, A, part, mu)
ulist.append(u)
fc_tot += fc
jc_tot += jc
else:
for A, mb in enumerate(mbs):
update_be_good_cons(mb, A, part, good_rdm1s)
global determine_be_match_pot_frag_
def determine_be_match_pot_frag_(A):
start = time.time()
ret = determine_be_match_pot_frag(mbs[A], A, part, mu, verbose=0)
end = time.time()
if mbatm.verbose > 1:
print("frag %d/%d done in %.3f sec" %
(A, part.nfrag, end-start), flush=True)
return ret
nsite_by_frag = [frag.nsite for frag in part.fraglist]
run_order = np.argsort(nsite_by_frag)[::-1].tolist()
with mtproc.Pool(mbatm.nproc) as pool:
res = pool.map(determine_be_match_pot_frag_, run_order)
ulist = [res[run_order.index(A)][0] for A in range(part.nfrag)]
for A in range(part.nfrag):
mbs[A].u = ulist[A]
fc_tot = sum([res[A][1] for A in range(part.nfrag)])
jc_tot = sum([res[A][2] for A in range(part.nfrag)])
mbatm.u = np.concatenate(ulist)
mbatm.fcs_tot.append(fc_tot)
mbatm.jcs_tot.append(jc_tot)
def collect_be_match_pot(mbatm, mbs):
mbatm.u = np.concatenate([mb.u for mb in mbs])
def solve_impurity_mol(mb, A, part, mu, rdm_level=0):
# add chempot
chempot = get_chempot_mat(mb, A, part, mu)
# one-shot
mb.heff_extra = chempot
mc = mb.solve_impurity(rdm_level=1)
mb.heff_extra = None
rdm1 = mb.msd.make_rdm1(mc) if rdm_level > 0 else None
rdm2 = mb.msd.make_rdm2(mc) if rdm_level > 1 else None
return rdm1, rdm2
"""
Methods for determining chemical potential
"""
def get_chempot_mat(mb, A, part, mu):
chempot = np.zeros([mb.msd.nsao]*2)
cpdiag = []
for ind_in_A,ind in enumerate(part.fraglist[A].indlist):
val = 1. if A in part.centerlist[ind] else 0.
cpdiag += [val] * len(part.fragsitelist[A][ind_in_A])
chempot[:mb.msd.nf,:mb.msd.nf] = -mu*np.diag(cpdiag)
return chempot
def get_be_mol_nelec_frag_(A):
pass
def get_be_mol_nelec(mulist, mbs, part, nproc, verbose):
if isinstance(mulist, float):
mulist = [mulist]
nmu = len(mulist)
if nproc == 1:
neleclist = np.zeros_like(mulist)
for i,mu in enumerate(mulist):
nelecs = np.zeros(part.nmotif)
for A, mb in enumerate(mbs):
rdm1, rdm2 = solve_impurity_mol(mb, A, part, mu, rdm_level=1)
# collect contributions of fragment A to each motif
part.get_property_by_motif(A, np.diag(rdm1), nelecs)
rdm1 = None
neleclist[i] = np.sum(nelecs)
else:
global get_be_mol_nelec_frag_
def get_be_mol_nelec_frag_(A, mu):
nelecs_A = np.zeros(part.nmotif)
rdm1, rdm2 = solve_impurity_mol(mbs[A], A, part, mu, rdm_level=1)
part.get_property_by_motif(A, np.diag(rdm1), nelecs_A)
rdm1 = None
return nelecs_A
nsite_by_frag = [frag.nsite for frag in part.fraglist]
run_order = np.argsort(nsite_by_frag)[::-1].tolist()
with mtproc.Pool(nproc) as pool:
res = pool.starmap(get_be_mol_nelec_frag_,
its.product(run_order,mulist))
neleclist = [np.sum(sum(res[i::nmu])) for i in range(nmu)]
if verbose > 4:
prtvar("nocc by motif", nelecs, "{:.3f}")
# return scalar if input is scalar
if nmu == 1: neleclist = neleclist[0]
return neleclist
from frankenstein.be.bemol_utils import chempot_solver
def determine_chempot(mbatm, mbs):
nproc = mbatm.nproc
nocc = mbatm.nocc
part = mbatm.msdatm.part
method = mbatm.chempot_method
bracket = mbatm.chempot_bracket
max_iter = mbatm.chempot_max_iter
thresh = 10.**-mbatm.chempot_conv
verbose = mbatm.verbose
args = (mbs, part, nproc, verbose, )
mbatm.mu = chempot_solver(nocc, get_be_mol_nelec, args, method, bracket,
max_iter, thresh, verbose)
def get_be_mol_obj_frag_(A):
pass
def get_be_mol_obj(mbatm, mbs):
part = mbatm.msdatm.part
mu = mbatm.mu
verbose = mbatm.verbose
if mbatm.nproc == 1:
ebe_ps = np.zeros([part.nmotif])
erhf_ps = np.zeros([part.nmotif])
mbatm.good_rdm1s = [None] * len(mbs)
for A, mb in enumerate(mbs):
# @@HY: here we request rdm2 since we need to evaluate energy
mc_rdm1, mc_rdm2 = solve_impurity_mol(mb, A, part, mu, rdm_level=2)
# update good_rdms
nfs = mb.msd.nf
if mbatm.debug_mode == 2:
mbatm.good_rdm1s[A] = mc_rdm1
else:
mbatm.good_rdm1s[A] = mc_rdm1[:nfs,:nfs].copy()
# evaluate dmet fragment energy
if mbatm.e_rhf_permotif is None: # only needs to compute once
e1A, e2A, e12A, e1A_rhf, e2A_rhf, e12A_rhf = \
mb.msd.get_SD_energy(mc_rdm1, mc_rdm2, ret_rhf=True)
part.get_property_by_motif(A, e12A_rhf, erhf_ps)
else:
e1A, e2A, e12A = mb.msd.get_SD_energy(mc_rdm1, mc_rdm2)
mc_rdm2 = None
part.get_property_by_motif(A, e12A, ebe_ps)
else:
global get_be_mol_obj_frag_
def get_be_mol_obj_frag_(A):
start = time.time()
mb = mbs[A]
mc_rdm1, mc_rdm2 = solve_impurity_mol(mb, A, part, mu, rdm_level=2)
ebe_ps_A = np.zeros([part.nmotif])
if mbatm.e_rhf_permotif is None:
erhf_ps_A = np.zeros([part.nmotif])
e1A, e2A, e12A, e1A_rhf, e2A_rhf, e12A_rhf = \
mb.msd.get_SD_energy(mc_rdm1, mc_rdm2, ret_rhf=True)
part.get_property_by_motif(A, e12A_rhf, erhf_ps_A)
else:
erhf_ps_A = None
e1A, e2A, e12A = mb.msd.get_SD_energy(mc_rdm1, mc_rdm2)
nfs = mb.msd.nf
if mbatm.debug_mode == 2:
mc_rdm1_frags = mc_rdm1
else:
mc_rdm1_frags = mc_rdm1[:nfs,:nfs].copy()
mc_rdm2 = None
mc_rdm1 = None
part.get_property_by_motif(A, e12A, ebe_ps_A)
end = time.time()
if mbatm.verbose > 1:
print("frag %d/%d done in %.3f sec" %
(A, part.nfrag, end-start), flush=True)
return mc_rdm1_frags, ebe_ps_A, erhf_ps_A
nsite_by_frag = [frag.nsite for frag in part.fraglist]
run_order = np.argsort(nsite_by_frag)[::-1].tolist()
with mtproc.Pool(mbatm.nproc) as pool:
res = pool.map(get_be_mol_obj_frag_, run_order)
if mbatm.verbose > 1:
print(flush=True)
mbatm.good_rdm1s = [res[run_order.index(A)][0]
for A in range(part.nfrag)]
ebe_ps = sum([res[A][1] for A in range(part.nfrag)])
if mbatm.e_rhf_permotif is None:
erhf_ps = sum([res[A][2] for A in range(part.nfrag)])
# update energy
if mbatm.e_rhf_permotif is None:
mbatm.e_rhf_permotif = erhf_ps
mbatm.e_be_permotif = ebe_ps
e_be = np.sum(ebe_ps)
if not mbatm.e_be is None:
mbatm.e_err = e_be - mbatm.e_be
mbatm.e_be = e_be
# get target values and current estimates
curestfrags, curestinds = part.curestfraglist, part.curestsitelist
targetfrags, targetinds = part.targetfraglist, part.targetsitelist
nmatch = len(curestfrags)
curests = np.zeros([nmatch])
targets = np.zeros([nmatch])
for A, mb in enumerate(mbs):
# distribute computed rdms to the right place
for B in range(nmatch):
if curestfrags[B] == A:
con = curestinds[B]
if len(con) == 2:
i,j = con
curests[B] = mbatm.good_rdm1s[A][i,j]
elif len(con) == 4:
i,j,k,l = con
curests[B] = mc_rdm2[i,j,k,l]
else:
raise RuntimeError("Invalid con: {:s}".format(str(con)))
for con, C in zip(targetinds[B], targetfrags[B]):
if C == A:
fac = 1. / float(len(targetfrags[B]))
if len(con) == 2:
i,j = con
targets[B] += mbatm.good_rdm1s[A][i,j] * fac
elif len(con) == 4:
i,j,k,l = con
targets[B] += mc_rdm2[i,j,k,l] * fac
else:
raise RuntimeError("Invalid con: {:s}".format(str(con)))
# compute the loss function
mbatm.obj = curests - targets if len(curests) > 0 else np.array([0.])
mbatm.err = np.mean(mbatm.obj**2)**0.5
def update_density(mbatm, good_rdm1s_old):
verbose = mbatm.verbose
if good_rdm1s_old is None or mbatm.mixing_beta == 0:
return
elif mbatm.check_conv_any() and not mbatm.mix_always:
if verbose > 1:
print(" Approach convergence. Turn off density mixing temporariliy.\n", flush=True)
return
else:
beta = mbatm.mixing_beta * 0.1
if verbose > 1:
print(" mixing densities with beta = {:.2f}\n".format(beta),
flush=True)
for i in range(mbatm.msdatm.part.nfrag):
mbatm.good_rdm1s[i] = mbatm.good_rdm1s[i] * (1.-beta) + \
good_rdm1s_old[i] * beta
def next_step(mbatm):
mbs = mbatm.initialize_fragments()
if mbatm.iteration == 1:
mbatm.set_init_guess()
prtvar("frag init pot", mbatm.u0_status, "{:s}")
mbatm.distribute_be_match_pot(mbs)
if mbatm.verbose > 1:
print("\n>> Determine matching potentials...\n", flush=True)
start = time.time()
mbatm.determine_be_match_pot(mbs)
end = time.time()
mbatm.ts_match.append(end - start)
if mbatm.debug_mode == 3:
print("\n>> Debug mode: compute and save densities...\n", flush=True)
mbatm.get_be_mol_obj(mbs)
mbatm.save_density(prefix="nomu", first_time=mbatm.iteration==1)
if mbatm.verbose > 1:
print("\n>> Determine chemical potentials...\n", flush=True)
start = time.time()
mbatm.determine_chempot(mbs)
end = time.time()
mbatm.ts_chpot.append(end - start)
if mbatm.verbose > 1:
print("\n>> Compute energy and update densities...\n", flush=True)
if mbatm.opt.upper() == "FP" and mbatm.mixing_beta > 0:
good_rdm1s_old = mbatm.good_rdm1s
start = time.time()
mbatm.get_be_mol_obj(mbs)
end = time.time()
mbatm.ts_energ.append(end - start)
if mbatm.opt.upper() == "FP" and mbatm.mixing_beta > 0:
mbatm.update_density(good_rdm1s_old)
if mbatm.debug_mode:
if mbatm.verbose > 1:
print("\n>> Saving good rdm1s to file...\n", flush=True)
mbatm.save_density(
first_time=(mbatm.iteration==1 and mbatm.debug_mode < 3))
class FP:
"""Fixed point algorithm
"""
def __init__(self, prob):
self.prob = prob
self.comment = "FP"
def next_step(self):
self.prob.next_step()
return self.prob.check_conv()
def print_beatom(mbatm, mode):
l0 = 15
if mode == 0:
pass
elif mode == 1:
prtvar("iter", mbatm.iteration, "{:d}", l0=l0)
prtvar("mu", mbatm.mu, "{: .10f}", l0=l0)
prtvar("err", mbatm.err, "{: .5e}", l0=l0)
prtvar("e_err", mbatm.e_err, "{: .5e}", l0=l0)
prtvar("ebe", mbatm.e_be, "{: .10f}", l0=l0)
prtvar("etot", mbatm.e_tot, "{: .10f}", l0=l0)
prtvar("ecorr", mbatm.e_corr, "{: .10f}", l0=l0)
prtvar("ebe by motif", mbatm.e_be_permotif, "{:12.6f}", l0=l0)
prtvar("erhf by motif", mbatm.e_rhf_permotif, "{:12.6f}", l0=l0)
prtvar("ecorr by motif", mbatm.e_be_permotif-mbatm.e_rhf_permotif,
"{:12.6f}", l0=l0)
prtvar("func call (sol)", mbatm.fcs_tot[-1], "{:d}", l0=l0)
prtvar("func call (jac)", mbatm.jcs_tot[-1], "{:d}", l0=l0)
prtvar("t_wall (match)", mbatm.ts_match[-1], "{:.6f}", l0=l0)
prtvar("t_wall (chpot)", mbatm.ts_chpot[-1], "{:.6f}", l0=l0)
prtvar("t_wall (energ)", mbatm.ts_energ[-1], "{:.6f}", l0=l0)
prtvar("u", mbatm.u, "{: .10f}", l0=l0)
prtvar("obj", mbatm.obj, "{: .10f}", l0=l0)
print("", flush=True)
elif mode == 2:
iteration = mbatm.iteration
t_match = np.sum(mbatm.ts_match)
t_chpot = np.sum(mbatm.ts_chpot)
t_energ = np.sum(mbatm.ts_energ)
t_tot = t_match + t_chpot + t_energ
fc_tot = sum(mbatm.fcs_tot)
jc_tot = sum(mbatm.jcs_tot)
prtvar("Converged", mbatm.is_converged, None)
prtvar("# iteration", iteration, "{:d}")
prtvar("Matching error", mbatm.err, "{: .5e}")
prtvar("Energy change", mbatm.e_err, "{: .5e}")
prtvar("Final BE energy", mbatm.e_be, "{: .10f}")
prtvar("Final Total energy", mbatm.e_tot, "{: .10f}")
prtvar("Final Corr energy", mbatm.e_corr, "{: .10f}")
prtvar("t_wall (match)", [t_match, t_match/(iteration-1)
if iteration > 0 else 0.], "{:.6f}")
prtvar("total fc (solver)", ["{:d}".format(fc_tot),
"{:.1f}".format(fc_tot/float(iteration))], "{:s}")
prtvar("total fc (jac)", ["{:d}".format(jc_tot),
"{:.1f}".format(jc_tot/float(iteration))], "{:s}")
prtvar("t_wall (chpot)", [t_chpot, t_chpot/iteration], "{:.6f}")
prtvar("t_wall (energ)", [t_energ, t_energ/iteration], "{:.6f}")
prtvar("t_wall (total)", t_tot, "{:.6f}")
print("", flush=True)
else:
raise ValueError("Unknown mode in print_beatom.")
def postprocessing(mbatm):
for m in mbatm.msdatm:
m.delete_eri()
mbatm.msdatm.delete_erifile()
"""
Methods for DIIS
"""
def get_diis_errvec(mbatm):
return mbatm.obj
def get_diis_fockvec(mbatm):
return np.concatenate([rdm1.ravel() for rdm1 in mbatm.good_rdm1s])
def update_fock(mbatm, good_rdm1s_new):
nelem_expect = sum([rdm1.size for rdm1 in mbatm.good_rdm1s])
assert(nelem_expect == good_rdm1s_new.size)
ind = 0
for i in range(mbatm.msdatm.nfrag):
nelem_i = mbatm.good_rdm1s[i].size
shape_i = mbatm.good_rdm1s[i].shape
mbatm.good_rdm1s[i] = good_rdm1s_new[ind:ind+nelem_i].reshape(*shape_i)
ind += nelem_i
class BEatom:
"""Basic class for Atom-based Bootstrap Embedding
"""
def __init__(self, msdatm, **kwargs):
"""
"""
if not isinstance(msdatm, pySDATOM):
raise ValueError("msdatm must be a pySDATOM instance.")
self.msdatm = msdatm
self.nocc = self.msdatm.nocc
self.e_nuc = self.msdatm.pymf.energy_nuc()
# these properties can be set at initialization
# parallel
self.nproc = 1
# for BE macrocycle
self.verbose = self.msdatm.verbose
self.obj_conv = 5
self.e_conv = 4
self.tight_conv = False # if true, both obj and energy need to converge
self.mixing_beta = 3
self.mix_always = True
self.max_iter = 20
self.opt = "fp"
self.max_diis = 20
self.debug_mode = False # if true, quantities like rdm1s is dumped
# for chemical potential
self.chempot_max_iter = 50
self.chempot_method = "quad"
self.chempot_bracket = [-0.01, 0.01]
self.chempot_conv = 6
# for each fragment
self.frag_obj_conv = 7
self.frag_du_conv = 9
self.frag_max_iter = 50
self.imp_sol = None
self.sol_params = None
self.jac = None
self.u0 = None
self.u0_status = None
self.B0 = None
self.__dict__.update(kwargs)
# don't touch these properties unless you know what you are doing
self.u = None
self.obj = None
self.mu = 0.
self.e_be_permotif = None
self.e_rhf_permotif = None
self.e_be = None
self.good_rdm1s = None
self.err = float("inf")
self.e_err = float("inf")
self.fcs_tot = []
self.jcs_tot = []
# print info
self.__str__()
def __str__(self):
if self.verbose > 0:
hstr = "BEatom : A class for atom-based Bootstrap Embedding"
hlen = len(hstr) + 4
print("=" * hlen, flush=True)
print(" {:s}".format(hstr), flush=True)
print("-" * (hlen), flush=True)
prtvar("nproc", self.nproc, "{:d}")
print("", flush=True)
prtvar("obj_conv", self.obj_conv, "{:d}")
prtvar("e_conv", self.e_conv, "{:d}")
prtvar("tight_conv", self.tight_conv, None)
prtvar("mixing_beta", self.mixing_beta, "{:d}")
prtvar("mix_always", self.mix_always, None)
prtvar("max_iter", self.max_iter, "{:d}")
prtvar("opt algorithm", self.opt, "{:s}")
if self.opt.upper() == "DIIS":
prtvar("DIIS subspace dim", self.max_diis, "{:d}")
print("", flush=True)
prtvar("frag obj_conv", self.frag_obj_conv, "{:d}")
prtvar("frag du_conv", self.frag_du_conv, "{:d}")
prtvar("frag max_iter", self.frag_max_iter, "{:d}")
prtvar("frag imp_sol", self.imp_sol, "{:s}")
print("-" * (hlen), flush=True)
print("", flush=True)
return ""
# properties
@property
def e_tot(self):
e_tot = self.e_be + self.e_nuc
if hasattr(self.msdatm, "Ecore"):
e_tot += self.msdatm.Ecore
return e_tot
@property
def e_corr(self):
return self.e_tot - self.msdatm.pymf.e_tot
# return np.sum(self.e_be_permotif) - np.sum(self.e_rhf_permotif)
# methods for kernel
set_init_guess = set_init_guess
distribute_be_match_pot = distribute_be_match_pot
determine_be_match_pot = determine_be_match_pot
collect_be_match_pot = collect_be_match_pot
determine_chempot = determine_chempot
get_be_mol_obj = get_be_mol_obj
update_density = update_density
next_step = next_step
print_beatom = print_beatom
postprocessing = postprocessing
# methods for diis
get_diis_errvec = get_diis_errvec
get_diis_fockvec = get_diis_fockvec
update_fock = update_fock
def initialize_fragments(self):
m = self.msdatm
mbs = []
for A in range(m.part.nfrag):
mb = BE(m[A],
verbose=self.verbose-1,
obj_conv=self.frag_obj_conv,
du_conv=self.frag_du_conv,
max_iter=self.frag_max_iter,
imp_sol=self.imp_sol,
sol_params=self.sol_params,
jac=self.jac,
B0=self.B0)
set_be_bad_cons(mb, A, m.part)
mbs.append(mb)
return mbs
def check_conv_any(self, err=None, e_err=None):
if err is None: err = self.err
if e_err is None: e_err = self.e_err
return err < 10**-self.obj_conv or abs(e_err) < 10**-self.e_conv
def check_conv(self, err=None, e_err=None):
if err is None: err = self.err
if e_err is None: e_err = self.e_err
flag_obj = err < 10**-self.obj_conv
flag_e = abs(e_err) < 10**-self.e_conv
if self.tight_conv:
# require both obj and energy being tightly converged
conv = flag_obj and flag_e
else:
# otherwise, require one being tightly converged, and the other
# "loosely" converged
flag_obj_loose = err < 10**-(self.obj_conv-1)
flag_e_loose = abs(e_err) < 10**-(self.e_conv-1)
conv = (flag_obj and flag_e_loose) or (flag_e and flag_obj_loose)
return conv
def get_optimizer(self):
if self.opt.upper() == "FP":
return FP(self)
elif self.opt.upper() == "DIIS":
from frankenstein.optimizer.diis import DIIS_gen
return DIIS_gen(self, max_diis=self.max_diis, diis_start=1)
else:
raise ValueError("Unknown optimizer '{:s}'".format(str(self.opt)))
def kernel(self):
optimizer = self.get_optimizer()
self.ts_match = []
self.ts_chpot = []
self.ts_energ = []
self.fcs_tot = []
self.jcs_tot = []
self.iteration = None
self.e_be = None
self.is_converged = False
for self.iteration in range(1,self.max_iter+1):
if optimizer.next_step():
self.is_converged = True
# print out info
if self.verbose > 0:
self.print_beatom(1)
if self.check_conv():
self.is_converged = True
break
# starting from the second iteration, the above is must
flag = True
if self.verbose > 0:
self.print_beatom(2)
self.postprocessing()
def save_density(self, good_rdm1s=None, fname="be_debug.h5", prefix="",
first_time=False):
import h5py
if first_time and os.path.isfile(fname): os.remove(fname)
if good_rdm1s is None: good_rdm1s = self.good_rdm1s
with h5py.File(fname, "a") as f:
groupname = "%siter%d" % (prefix, self.iteration)
if groupname in f: del f[groupname]
for A in range(self.msdatm.part.nfrag):
f.create_dataset("%s/%d" % (groupname, A), data=good_rdm1s[A])
if __name__ == "__main__":
from frankenstein.tools.pyscf_utils import get_pymol
from pyscf import scf
from frankenstein.pyscf_be.pysd_atom import pySDATOM
# geom = "../../../../geom/benchmark/polyacene/geom/1.xyz"
# geom = "../../../../geom/benchmark/polyacetylene_E/opt/16.xyz"
# geom = "../../../../geom/benchmark/buckyball/20.xyz"
geom = "../../../../geom/benchmark/h-cluster/3d_20.xyz"
basis = "sto-3g"
natom_per_frag = 10
pymol = get_pymol(geom, basis)
pymf = scf.RHF(pymol)
start = time.time()
pymf.kernel()
end = time.time()
print("t scf: {:.6f}".format(end-start))
from pyscf import mp
pymp = mp.MP2(pymf)
pymp.kernel()
print(pymp.e_corr)
msdatm = pySDATOM(pymf, natom_per_frag, incore=True, nibath=False,
# fast_ao2mo=True, thr_bath=1.E-6,
matchtypelist=[["intra", "1epop"], ["inter", "1ecoh"]], frzcore=True, verbose=2,
by_dist=True, by_bond=False)
# matchtypelist=[["intra", "1epop"], ["intra", "1ecoh"], ["inter", "1ecoh"]], frzcore=True, verbose=2)
mbatm = BEatom(msdatm, imp_sol="mp2", B0="scf", mixing_beta=3, chempot_method="quad", opt="fp")
# mbatm = BEatom(msdatm, imp_sol="MP2", jac="lr", mixing_beta=3)
mbatm.kernel()
|
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import errno
import os
import socket
import sys
import six
from ._exceptions import *
from ._logging import *
from ._socket import*
from ._ssl_compat import *
from ._url import *
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
__all__ = ["proxy_info", "connect", "read_headers"]
class proxy_info(object):
def __init__(self, **options):
self.host = options.get("http_proxy_host", None)
if self.host:
self.port = options.get("http_proxy_port", 0)
self.auth = options.get("http_proxy_auth", None)
self.no_proxy = options.get("http_no_proxy", None)
else:
self.port = 0
self.auth = None
self.no_proxy = None
def connect(url, options, proxy, socket):
hostname, port, resource, is_secure = parse_url(url)
if socket:
return socket, (hostname, port, resource)
addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
hostname, port, is_secure, proxy)
if not addrinfo_list:
raise WebSocketException(
"Host not found.: " + hostname + ":" + str(port))
sock = None
try:
sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
if need_tunnel:
sock = _tunnel(sock, hostname, port, auth)
if is_secure:
if HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
else:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
except:
if sock:
sock.close()
raise
def _get_addrinfo_list(hostname, port, is_secure, proxy):
phost, pport, pauth = get_proxy_info(
hostname, is_secure, proxy.host, proxy.port, proxy.auth, proxy.no_proxy)
if not phost:
addrinfo_list = socket.getaddrinfo(
hostname, port, 0, 0, socket.SOL_TCP)
return addrinfo_list, False, None
else:
pport = pport and pport or 80
addrinfo_list = socket.getaddrinfo(phost, pport, 0, 0, socket.SOL_TCP)
return addrinfo_list, True, pauth
def _open_socket(addrinfo_list, sockopt, timeout):
err = None
for addrinfo in addrinfo_list:
family = addrinfo[0]
sock = socket.socket(family)
sock.settimeout(timeout)
for opts in DEFAULT_SOCKET_OPTION:
sock.setsockopt(*opts)
for opts in sockopt:
sock.setsockopt(*opts)
address = addrinfo[4]
try:
sock.connect(address)
except socket.error as error:
error.remote_ip = str(address[0])
if error.errno in (errno.ECONNREFUSED, ):
err = error
continue
else:
raise
else:
break
else:
raise err
return sock
def _can_use_sni():
return six.PY2 and sys.version_info >= (2, 7, 9) or sys.version_info >= (3, 2)
def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_SSLv23))
if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
context.load_verify_locations(cafile=sslopt.get('ca_certs', None), capath=sslopt.get('ca_cert_path', None))
if sslopt.get('certfile', None):
context.load_cert_chain(
sslopt['certfile'],
sslopt.get('keyfile', None),
sslopt.get('password', None),
)
# see
# https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
context.verify_mode = sslopt['cert_reqs']
if HAVE_CONTEXT_CHECK_HOSTNAME:
context.check_hostname = check_hostname
if 'ciphers' in sslopt:
context.set_ciphers(sslopt['ciphers'])
if 'cert_chain' in sslopt:
certfile, keyfile, password = sslopt['cert_chain']
context.load_cert_chain(certfile, keyfile, password)
return context.wrap_socket(
sock,
do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
server_hostname=hostname,
)
def _ssl_socket(sock, user_sslopt, hostname):
sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
sslopt.update(user_sslopt)
if os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE'):
certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
else:
certPath = os.path.join(
os.path.dirname(__file__), "cacert.pem")
if os.path.isfile(certPath) and user_sslopt.get('ca_cert', None) is None:
sslopt['ca_certs'] = certPath
elif os.path.isdir(certPath) and user_sslopt.get('ca_cert_path', None) is None:
sslopt['ca_cert_path'] = certPath
check_hostname = sslopt["cert_reqs"] != ssl.CERT_NONE and sslopt.pop(
'check_hostname', True)
if _can_use_sni():
sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
else:
sslopt.pop('check_hostname', True)
sock = ssl.wrap_socket(sock, **sslopt)
if not HAVE_CONTEXT_CHECK_HOSTNAME and check_hostname:
match_hostname(sock.getpeercert(), hostname)
return sock
def _tunnel(sock, host, port, auth):
debug("Connecting proxy...")
connect_header = "CONNECT %s:%d HTTP/1.0\r\n" % (host, port)
# TODO: support digest auth.
if auth and auth[0]:
auth_str = auth[0]
if auth[1]:
auth_str += ":" + auth[1]
encoded_str = base64encode(auth_str.encode()).strip().decode()
connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
connect_header += "\r\n"
dump("request header", connect_header)
send(sock, connect_header)
try:
status, resp_headers = read_headers(sock)
except Exception as e:
raise WebSocketProxyException(str(e))
if status != 200:
raise WebSocketProxyException(
"failed CONNECT via proxy status: %r" % status)
return sock
def read_headers(sock):
status = None
headers = {}
trace("--- response header ---")
while True:
line = recv_line(sock)
line = line.decode('utf-8').strip()
if not line:
break
trace(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip()
else:
raise WebSocketException("Invalid header")
trace("-----------------------")
return status, headers
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: eval.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="eval.proto",
package="capnp.benchmark.protobuf",
syntax="proto2",
serialized_pb=_b(
'\n\neval.proto\x12\x18\x63\x61pnp.benchmark.protobuf"\xe5\x01\n\nExpression\x12/\n\x02op\x18\x01 \x02(\x0e\x32#.capnp.benchmark.protobuf.Operation\x12\x12\n\nleft_value\x18\x02 \x01(\x05\x12=\n\x0fleft_expression\x18\x03 \x01(\x0b\x32$.capnp.benchmark.protobuf.Expression\x12\x13\n\x0bright_value\x18\x04 \x01(\x05\x12>\n\x10right_expression\x18\x05 \x01(\x0b\x32$.capnp.benchmark.protobuf.Expression"!\n\x10\x45valuationResult\x12\r\n\x05value\x18\x01 \x02(\x11*I\n\tOperation\x12\x07\n\x03\x41\x44\x44\x10\x00\x12\x0c\n\x08SUBTRACT\x10\x01\x12\x0c\n\x08MULTIPLY\x10\x02\x12\n\n\x06\x44IVIDE\x10\x03\x12\x0b\n\x07MODULUS\x10\x04'
),
)
_OPERATION = _descriptor.EnumDescriptor(
name="Operation",
full_name="capnp.benchmark.protobuf.Operation",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="ADD", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SUBTRACT", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MULTIPLY", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DIVIDE", index=3, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MODULUS", index=4, number=4, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=307,
serialized_end=380,
)
_sym_db.RegisterEnumDescriptor(_OPERATION)
Operation = enum_type_wrapper.EnumTypeWrapper(_OPERATION)
ADD = 0
SUBTRACT = 1
MULTIPLY = 2
DIVIDE = 3
MODULUS = 4
_EXPRESSION = _descriptor.Descriptor(
name="Expression",
full_name="capnp.benchmark.protobuf.Expression",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="op",
full_name="capnp.benchmark.protobuf.Expression.op",
index=0,
number=1,
type=14,
cpp_type=8,
label=2,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="left_value",
full_name="capnp.benchmark.protobuf.Expression.left_value",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="left_expression",
full_name="capnp.benchmark.protobuf.Expression.left_expression",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="right_value",
full_name="capnp.benchmark.protobuf.Expression.right_value",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="right_expression",
full_name="capnp.benchmark.protobuf.Expression.right_expression",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=41,
serialized_end=270,
)
_EVALUATIONRESULT = _descriptor.Descriptor(
name="EvaluationResult",
full_name="capnp.benchmark.protobuf.EvaluationResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="value",
full_name="capnp.benchmark.protobuf.EvaluationResult.value",
index=0,
number=1,
type=17,
cpp_type=1,
label=2,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=272,
serialized_end=305,
)
_EXPRESSION.fields_by_name["op"].enum_type = _OPERATION
_EXPRESSION.fields_by_name["left_expression"].message_type = _EXPRESSION
_EXPRESSION.fields_by_name["right_expression"].message_type = _EXPRESSION
DESCRIPTOR.message_types_by_name["Expression"] = _EXPRESSION
DESCRIPTOR.message_types_by_name["EvaluationResult"] = _EVALUATIONRESULT
DESCRIPTOR.enum_types_by_name["Operation"] = _OPERATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Expression = _reflection.GeneratedProtocolMessageType(
"Expression",
(_message.Message,),
dict(
DESCRIPTOR=_EXPRESSION,
__module__="eval_pb2"
# @@protoc_insertion_point(class_scope:capnp.benchmark.protobuf.Expression)
),
)
_sym_db.RegisterMessage(Expression)
EvaluationResult = _reflection.GeneratedProtocolMessageType(
"EvaluationResult",
(_message.Message,),
dict(
DESCRIPTOR=_EVALUATIONRESULT,
__module__="eval_pb2"
# @@protoc_insertion_point(class_scope:capnp.benchmark.protobuf.EvaluationResult)
),
)
_sym_db.RegisterMessage(EvaluationResult)
# @@protoc_insertion_point(module_scope)
|
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
import urllib
import xml.sax
import boto
from boto import handler
from boto.resultset import ResultSet
from boto.exception import GSResponseError
from boto.exception import InvalidAclError
from boto.gs.acl import ACL, CannedACLStrings
from boto.gs.acl import SupportedPermissions as GSPermissions
from boto.gs.bucketlistresultset import VersionedBucketListResultSet
from boto.gs.cors import Cors
from boto.gs.lifecycle import LifecycleConfig
from boto.gs.key import Key as GSKey
from boto.s3.acl import Policy
from boto.s3.bucket import Bucket as S3Bucket
from boto.utils import get_utf8_value
from boto.compat import six
# constants for http query args
DEF_OBJ_ACL = 'defaultObjectAcl'
STANDARD_ACL = 'acl'
CORS_ARG = 'cors'
LIFECYCLE_ARG = 'lifecycle'
STORAGE_CLASS_ARG='storageClass'
ERROR_DETAILS_REGEX = re.compile(r'<Details>(?P<details>.*)</Details>')
class Bucket(S3Bucket):
"""Represents a Google Cloud Storage bucket."""
StorageClassBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<StorageClass>%s</StorageClass>')
VersioningBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<VersioningConfiguration><Status>%s</Status>'
'</VersioningConfiguration>')
WebsiteBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<WebsiteConfiguration>%s%s</WebsiteConfiguration>')
WebsiteMainPageFragment = '<MainPageSuffix>%s</MainPageSuffix>'
WebsiteErrorFragment = '<NotFoundPage>%s</NotFoundPage>'
def __init__(self, connection=None, name=None, key_class=GSKey):
super(Bucket, self).__init__(connection, name, key_class)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'CreationDate':
self.creation_date = value
else:
setattr(self, name, value)
def get_key(self, key_name, headers=None, version_id=None,
response_headers=None, generation=None):
"""Returns a Key instance for an object in this bucket.
Note that this method uses a HEAD request to check for the existence of
the key.
:type key_name: string
:param key_name: The name of the key to retrieve
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/06N3b for details.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: A specific generation number to fetch the key at. If
not specified, the latest generation is fetched.
:rtype: :class:`boto.gs.key.Key`
:returns: A Key object from this bucket.
"""
query_args_l = []
if generation:
query_args_l.append('generation=%s' % generation)
if response_headers:
for rk, rv in six.iteritems(response_headers):
query_args_l.append('%s=%s' % (rk, urllib.quote(rv)))
try:
key, resp = self._get_key_internal(key_name, headers,
query_args_l=query_args_l)
except GSResponseError as e:
if e.status == 403 and 'Forbidden' in e.reason:
# If we failed getting an object, let the user know which object
# failed rather than just returning a generic 403.
e.reason = ("Access denied to 'gs://%s/%s'." %
(self.name, key_name))
raise
return key
def copy_key(self, new_key_name, src_bucket_name, src_key_name,
metadata=None, src_version_id=None, storage_class='STANDARD',
preserve_acl=False, encrypt_key=False, headers=None,
query_args=None, src_generation=None):
"""Create a new key in the bucket by copying an existing key.
:type new_key_name: string
:param new_key_name: The name of the new key
:type src_bucket_name: string
:param src_bucket_name: The name of the source bucket
:type src_key_name: string
:param src_key_name: The name of the source key
:type src_generation: int
:param src_generation: The generation number of the source key to copy.
If not specified, the latest generation is copied.
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type version_id: string
:param version_id: Unused in this subclass.
:type storage_class: string
:param storage_class: The storage class of the new key. By
default, the new key will use the standard storage class.
Possible values are: STANDARD | DURABLE_REDUCED_AVAILABILITY
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to GCS, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL (or if you have a default ACL set
on the bucket), a value of False will be significantly more
efficient.
:type encrypt_key: bool
:param encrypt_key: Included for compatibility with S3. This argument is
ignored.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type query_args: string
:param query_args: A string of additional querystring arguments
to append to the request
:rtype: :class:`boto.gs.key.Key`
:returns: An instance of the newly created key object
"""
if src_generation:
headers = headers or {}
headers['x-goog-copy-source-generation'] = str(src_generation)
return super(Bucket, self).copy_key(
new_key_name, src_bucket_name, src_key_name, metadata=metadata,
storage_class=storage_class, preserve_acl=preserve_acl,
encrypt_key=encrypt_key, headers=headers, query_args=query_args)
def list_versions(self, prefix='', delimiter='', marker='',
generation_marker='', headers=None):
"""
List versioned objects within a bucket. This returns an
instance of an VersionedBucketListResultSet that automatically
handles all of the result paging, etc. from GCS. You just need
to keep iterating until there are no more results. Called
with no arguments, this will return an iterator object across
all keys within the bucket.
:type prefix: string
:param prefix: allows you to limit the listing to a particular
prefix. For example, if you call the method with
prefix='/foo/' then the iterator will only cycle through
the keys that begin with the string '/foo/'.
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
to allow you to organize and browse your keys
hierarchically. See:
https://developers.google.com/storage/docs/reference-headers#delimiter
for more details.
:type marker: string
:param marker: The "marker" of where you are in the result set
:type generation_marker: string
:param generation_marker: The "generation marker" of where you are in
the result set.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:rtype:
:class:`boto.gs.bucketlistresultset.VersionedBucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc.
"""
return VersionedBucketListResultSet(self, prefix, delimiter,
marker, generation_marker,
headers)
def validate_get_all_versions_params(self, params):
"""
See documentation in boto/s3/bucket.py.
"""
self.validate_kwarg_names(params,
['version_id_marker', 'delimiter', 'marker',
'generation_marker', 'prefix', 'max_keys'])
def delete_key(self, key_name, headers=None, version_id=None,
mfa_token=None, generation=None):
"""
Deletes a key from the bucket.
:type key_name: string
:param key_name: The key name to delete
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type version_id: string
:param version_id: Unused in this subclass.
:type mfa_token: tuple or list of strings
:param mfa_token: Unused in this subclass.
:type generation: int
:param generation: The generation number of the key to delete. If not
specified, the latest generation number will be deleted.
:rtype: :class:`boto.gs.key.Key`
:returns: A key object holding information on what was
deleted.
"""
query_args_l = []
if generation:
query_args_l.append('generation=%s' % generation)
self._delete_key_internal(key_name, headers=headers,
version_id=version_id, mfa_token=mfa_token,
query_args_l=query_args_l)
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None,
generation=None, if_generation=None, if_metageneration=None):
"""Sets or changes a bucket's or key's ACL.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
else:
self.set_canned_acl(acl_or_str, key_name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_def_acl(self, acl_or_str, headers=None):
"""Sets or changes a bucket's default ACL.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_def_xml_acl(acl_or_str.to_xml(), headers=headers)
else:
self.set_def_canned_acl(acl_or_str, headers=headers)
def _get_xml_acl_helper(self, key_name, headers, query_args):
"""Provides common functionality for get_xml_acl and _get_acl_helper."""
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
if response.status == 403:
match = ERROR_DETAILS_REGEX.search(body)
details = match.group('details') if match else None
if details:
details = (('<Details>%s. Note that Full Control access'
' is required to access ACLs.</Details>') %
details)
body = re.sub(ERROR_DETAILS_REGEX, details, body)
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def _get_acl_helper(self, key_name, headers, query_args):
"""Provides common functionality for get_acl and get_def_acl."""
body = self._get_xml_acl_helper(key_name, headers, query_args)
acl = ACL(self)
h = handler.XmlHandler(acl, self)
xml.sax.parseString(body, h)
return acl
def get_acl(self, key_name='', headers=None, version_id=None,
generation=None):
"""Returns the ACL of the bucket or an object in the bucket.
:param str key_name: The name of the object to get the ACL for. If not
specified, the ACL for the bucket will be returned.
:param dict headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
the ACL of an object, not a bucket.
:rtype: :class:`.gs.acl.ACL`
"""
query_args = STANDARD_ACL
if generation:
query_args += '&generation=%s' % generation
return self._get_acl_helper(key_name, headers, query_args)
def get_xml_acl(self, key_name='', headers=None, version_id=None,
generation=None):
"""Returns the ACL string of the bucket or an object in the bucket.
:param str key_name: The name of the object to get the ACL for. If not
specified, the ACL for the bucket will be returned.
:param dict headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
the ACL of an object, not a bucket.
:rtype: str
"""
query_args = STANDARD_ACL
if generation:
query_args += '&generation=%s' % generation
return self._get_xml_acl_helper(key_name, headers, query_args)
def get_def_acl(self, headers=None):
"""Returns the bucket's default ACL.
:param dict headers: Additional headers to set during the request.
:rtype: :class:`.gs.acl.ACL`
"""
return self._get_acl_helper('', headers, DEF_OBJ_ACL)
def _set_acl_helper(self, acl_or_str, key_name, headers, query_args,
generation, if_generation, if_metageneration,
canned=False):
"""Provides common functionality for set_acl, set_xml_acl,
set_canned_acl, set_def_acl, set_def_xml_acl, and
set_def_canned_acl()."""
headers = headers or {}
data = ''
if canned:
headers[self.connection.provider.acl_header] = acl_or_str
else:
data = acl_or_str
if generation:
query_args += '&generation=%s' % generation
if if_metageneration is not None and if_generation is None:
raise ValueError("Received if_metageneration argument with no "
"if_generation argument. A metageneration has no "
"meaning without a content generation.")
if not key_name and (if_generation or if_metageneration):
raise ValueError("Received if_generation or if_metageneration "
"parameter while setting the ACL of a bucket.")
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if if_metageneration is not None:
headers['x-goog-if-metageneration-match'] = str(if_metageneration)
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), get_utf8_value(key_name),
data=get_utf8_value(data), headers=headers, query_args=query_args)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
query_args='acl', generation=None, if_generation=None,
if_metageneration=None):
"""Sets a bucket's or objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type query_args: str
:param query_args: The query parameters to pass with the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
return self._set_acl_helper(acl_str, key_name=key_name, headers=headers,
query_args=query_args,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, key_name='', headers=None,
version_id=None, generation=None, if_generation=None,
if_metageneration=None):
"""Sets a bucket's or objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if acl_str not in CannedACLStrings:
raise ValueError("Provided canned ACL string (%s) is not valid."
% acl_str)
query_args = STANDARD_ACL
return self._set_acl_helper(acl_str, key_name, headers, query_args,
generation, if_generation,
if_metageneration, canned=True)
def set_def_canned_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
if acl_str not in CannedACLStrings:
raise ValueError("Provided canned ACL string (%s) is not valid."
% acl_str)
query_args = DEF_OBJ_ACL
return self._set_acl_helper(acl_str, '', headers, query_args,
generation=None, if_generation=None,
if_metageneration=None, canned=True)
def set_def_xml_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL)
def get_cors(self, headers=None):
"""Returns a bucket's CORS XML document.
:param dict headers: Additional headers to send with the request.
:rtype: :class:`~.cors.Cors`
"""
response = self.connection.make_request('GET', self.name,
query_args=CORS_ARG,
headers=headers)
body = response.read()
if response.status == 200:
# Success - parse XML and return Cors object.
cors = Cors()
h = handler.XmlHandler(cors, self)
xml.sax.parseString(body, h)
return cors
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors(self, cors, headers=None):
"""Sets a bucket's CORS XML document.
:param str cors: A string containing the CORS XML.
:param dict headers: Additional headers to send with the request.
"""
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), data=get_utf8_value(cors),
query_args=CORS_ARG, headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_storage_class(self):
"""
Returns the StorageClass for the bucket.
:rtype: str
:return: The StorageClass for the bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args=STORAGE_CLASS_ARG)
body = response.read()
if response.status == 200:
rs = ResultSet(self)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs.StorageClass
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_storage_class(self, storage_class, headers=None):
"""
Sets a bucket's storage class.
:param str storage_class: A string containing the storage class.
:param dict headers: Additional headers to send with the request.
"""
req_body = self.StorageClassBody % (get_utf8_value(storage_class))
self.set_subresource(STORAGE_CLASS_ARG, req_body, headers=headers)
# Method with same signature as boto.s3.bucket.Bucket.add_email_grant(),
# to allow polymorphic treatment at application layer.
def add_email_grant(self, permission, email_address,
recursive=False, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the GS
account your are granting the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_email_grant(permission, email_address, headers=headers)
# Method with same signature as boto.s3.bucket.Bucket.add_user_grant(),
# to allow polymorphic treatment at application layer.
def add_user_grant(self, permission, user_id, recursive=False,
headers=None):
"""
Convenience method that provides a quick way to add a canonical user
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUTs the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ|WRITE|FULL_CONTROL)
:type user_id: string
:param user_id: The canonical user id associated with the GS account
you are granting the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_user_grant(permission, user_id)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_user_grant(permission, user_id, headers=headers)
def add_group_email_grant(self, permission, email_address, recursive=False,
headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|WRITE|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_group_email_grant(permission, email_address,
headers=headers)
# Method with same input signature as boto.s3.bucket.Bucket.list_grants()
# (but returning different object type), to allow polymorphic treatment
# at application layer.
def list_grants(self, headers=None):
"""Returns the ACL entries applied to this bucket.
:param dict headers: Additional headers to send with the request.
:rtype: list containing :class:`~.gs.acl.Entry` objects.
"""
acl = self.get_acl(headers=headers)
return acl.entries
def disable_logging(self, headers=None):
"""Disable logging on this bucket.
:param dict headers: Additional headers to send with the request.
"""
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging/>'
self.set_subresource('logging', xml_str, headers=headers)
def enable_logging(self, target_bucket, target_prefix=None, headers=None):
"""Enable logging on a bucket.
:type target_bucket: bucket or string
:param target_bucket: The bucket to log to.
:type target_prefix: string
:param target_prefix: The prefix which should be prepended to the
generated log files written to the target_bucket.
:param dict headers: Additional headers to send with the request.
"""
if isinstance(target_bucket, Bucket):
target_bucket = target_bucket.name
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging>'
xml_str = (xml_str + '<LogBucket>%s</LogBucket>' % target_bucket)
if target_prefix:
xml_str = (xml_str +
'<LogObjectPrefix>%s</LogObjectPrefix>' % target_prefix)
xml_str = xml_str + '</Logging>'
self.set_subresource('logging', xml_str, headers=headers)
def get_logging_config_with_xml(self, headers=None):
"""Returns the current status of logging configuration on the bucket as
unparsed XML.
:param dict headers: Additional headers to send with the request.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing the parsed XML response from GCS. The
overall structure is:
* Logging
* LogObjectPrefix: Prefix that is prepended to log objects.
* LogBucket: Target bucket for log objects.
2) Unparsed XML describing the bucket's logging configuration.
"""
response = self.connection.make_request('GET', self.name,
query_args='logging',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def get_logging_config(self, headers=None):
"""Returns the current status of logging configuration on the bucket.
:param dict headers: Additional headers to send with the request.
:rtype: dict
:returns: A dictionary containing the parsed XML response from GCS. The
overall structure is:
* Logging
* LogObjectPrefix: Prefix that is prepended to log objects.
* LogBucket: Target bucket for log objects.
"""
return self.get_logging_config_with_xml(headers)[0]
def configure_website(self, main_page_suffix=None, error_key=None,
headers=None):
"""Configure this bucket to act as a website
:type main_page_suffix: str
:param main_page_suffix: Suffix that is appended to a request that is
for a "directory" on the website endpoint (e.g. if the suffix is
index.html and you make a request to samplebucket/images/ the data
that is returned will be for the object with the key name
images/index.html). The suffix must not be empty and must not
include a slash character. This parameter is optional and the
property is disabled if excluded.
:type error_key: str
:param error_key: The object key name to use when a 400 error occurs.
This parameter is optional and the property is disabled if excluded.
:param dict headers: Additional headers to send with the request.
"""
if main_page_suffix:
main_page_frag = self.WebsiteMainPageFragment % main_page_suffix
else:
main_page_frag = ''
if error_key:
error_frag = self.WebsiteErrorFragment % error_key
else:
error_frag = ''
body = self.WebsiteBody % (main_page_frag, error_frag)
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), data=get_utf8_value(body),
query_args='websiteConfig', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_configuration(self, headers=None):
"""Returns the current status of website configuration on the bucket.
:param dict headers: Additional headers to send with the request.
:rtype: dict
:returns: A dictionary containing the parsed XML response from GCS. The
overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that
is for a "directory" on the website endpoint.
* NotFoundPage: name of an object to serve when site visitors
encounter a 404.
"""
return self.get_website_configuration_with_xml(headers)[0]
def get_website_configuration_with_xml(self, headers=None):
"""Returns the current status of website configuration on the bucket as
unparsed XML.
:param dict headers: Additional headers to send with the request.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing the parsed XML response from GCS. The
overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that is for
a "directory" on the website endpoint.
* NotFoundPage: name of an object to serve when site visitors
encounter a 404
2) Unparsed XML describing the bucket's website configuration.
"""
response = self.connection.make_request('GET', self.name,
query_args='websiteConfig', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def delete_website_configuration(self, headers=None):
"""Remove the website configuration from this bucket.
:param dict headers: Additional headers to send with the request.
"""
self.configure_website(headers=headers)
def get_versioning_status(self, headers=None):
"""Returns the current status of versioning configuration on the bucket.
:rtype: bool
"""
response = self.connection.make_request('GET', self.name,
query_args='versioning',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
resp_json = boto.jsonresponse.Element()
boto.jsonresponse.XmlHandler(resp_json, None).parse(body)
resp_json = resp_json['VersioningConfiguration']
return ('Status' in resp_json) and (resp_json['Status'] == 'Enabled')
def configure_versioning(self, enabled, headers=None):
"""Configure versioning for this bucket.
:param bool enabled: If set to True, enables versioning on this bucket.
If set to False, disables versioning.
:param dict headers: Additional headers to send with the request.
"""
if enabled == True:
req_body = self.VersioningBody % ('Enabled')
else:
req_body = self.VersioningBody % ('Suspended')
self.set_subresource('versioning', req_body, headers=headers)
def get_lifecycle_config(self, headers=None):
"""
Returns the current lifecycle configuration on the bucket.
:rtype: :class:`boto.gs.lifecycle.LifecycleConfig`
:returns: A LifecycleConfig object that describes all current
lifecycle rules in effect for the bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args=LIFECYCLE_ARG, headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
lifecycle_config = LifecycleConfig()
h = handler.XmlHandler(lifecycle_config, self)
xml.sax.parseString(body, h)
return lifecycle_config
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_lifecycle(self, lifecycle_config, headers=None):
"""
Configure lifecycle for this bucket.
:type lifecycle_config: :class:`boto.gs.lifecycle.LifecycleConfig`
:param lifecycle_config: The lifecycle configuration you want
to configure for this bucket.
"""
xml = lifecycle_config.to_xml()
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), data=get_utf8_value(xml),
query_args=LIFECYCLE_ARG, headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
|
|
import numpy as np
import scipy.stats as ss
import scipy.special as sp
from .family import Family
from .flat import Flat
from .gas_recursions import gas_recursion_normal_orderone, gas_recursion_normal_ordertwo
from .gas_recursions import gasx_recursion_normal_orderone, gasx_recursion_normal_ordertwo
from .gas_recursions import gas_llev_recursion_normal_orderone, gas_llev_recursion_normal_ordertwo
from .gas_recursions import gas_llt_recursion_normal_orderone, gas_llt_recursion_normal_ordertwo
from .gas_recursions import gas_reg_recursion_normal_orderone, gas_reg_recursion_normal_ordertwo
class Normal(Family):
"""
Normal Distribution
----
This class contains methods relating to the normal distribution for time series.
"""
def __init__(self, mu=0.0, sigma=1.0, transform=None, **kwargs):
"""
Parameters
----------
mu : float
Mean parameter for the Normal distribution
sigma : float
Standard deviation for the Normal distribution
transform : str
Whether to apply a transformation for the location latent variable - e.g. 'exp' or 'logit'
"""
super(Normal, self).__init__(transform)
self.mu0 = mu
self.sigma0 = sigma
self.param_no = 2
self.covariance_prior = False
self.gradient_only = kwargs.get('gradient_only', False) # used for GAS Normal models
if self.gradient_only is True:
self.score_function = self.first_order_score
else:
self.score_function = self.second_order_score
def approximating_model(self, beta, T, Z, R, Q, h_approx, data):
""" Creates approximating Gaussian state space model for the Normal measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no):
""" Creates approximating Gaussian state space model for the Normal measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
X: np.array
The regressors
state_no : int
Number of states
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
@staticmethod
def build_latent_variables():
""" Builds additional latent variables for this family in a probabilistic model
Returns
----------
- A list of lists (each sub-list contains latent variable information)
"""
lvs_to_build = []
lvs_to_build.append(['Normal Scale', Flat(transform='exp'), Normal(0, 3), 0.0])
return lvs_to_build
@staticmethod
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from this distribution with new latent variables
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return np.random.normal(loc, scale, nsims)
def draw_variable_local(self, size):
""" Simulate from the Normal distribution using instance values
Parameters
----------
size : int
How many simulations to perform
Returns
----------
np.ndarray of Normal random variable
"""
return ss.norm.rvs(loc=self.mu0, scale=self.sigma0, size=size)
@staticmethod
def first_order_score(y, mean, scale, shape, skewness):
""" GAS Normal Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Score of the Normal family
"""
return (y-mean)/np.power(scale,2)
def logpdf(self, mu):
"""
Log PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return -np.log(float(self.sigma0)) - (0.5*(mu-self.mu0)**2)/float(self.sigma0**2)
@staticmethod
def markov_blanket(y, mean, scale, shape, skewness):
""" Markov blanket for each likelihood term - used for state space models
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Markov blanket of the Normal family
"""
return ss.norm.logpdf(y, loc=mean, scale=scale)
@staticmethod
def setup():
""" Returns the attributes of this family if using in a probabilistic model
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "Normal"
link = np.array
scale = True
shape = False
skewness = False
mean_transform = np.array
cythonized = True
return name, link, scale, shape, skewness, mean_transform, cythonized
@staticmethod
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function for this distribution
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Negative loglikelihood of the Normal family
"""
return -np.sum(ss.norm.logpdf(y, loc=mean, scale=scale))
def pdf(self, mu):
"""
PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return (1.0/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
@staticmethod
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Normal Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Score of the Normal family
"""
return X*(y-mean)
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS Normal Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Adjusted score of the Normal family
"""
return y-mean
def vi_change_param(self, index, value):
""" Wrapper function for changing latent variables - variational inference
Parameters
----------
index : int
0 or 1 depending on which latent variable
value : float
What to change the latent variable to
"""
if index == 0:
self.mu0 = value
elif index == 1:
self.sigma0 = np.exp(value)
def vi_return_param(self, index):
""" Wrapper function for selecting appropriate latent variable for variational inference
Parameters
----------
index : int
0 or 1 depending on which latent variable
Returns
----------
The appropriate indexed parameter
"""
if index == 0:
return self.mu0
elif index == 1:
return np.log(self.sigma0)
def vi_loc_score(self,x):
""" The gradient of the location latent variable mu - used for variational inference
Parameters
----------
x : float
A random variable
Returns
----------
The gradient of the location latent variable mu at x
"""
return (x-self.mu0)/(self.sigma0**2)
def vi_scale_score(self,x):
""" The score of the scale, where scale = exp(x) - used for variational inference
Parameters
----------
x : float
A random variable
Returns
----------
The gradient of the scale latent variable at x
"""
return np.exp(-2.0*np.log(self.sigma0))*(x-self.mu0)**2 - 1.0
def vi_score(self, x, index):
""" Wrapper function for selecting appropriate score
Parameters
----------
x : float
A random variable
index : int
0 or 1 depending on which latent variable
Returns
----------
The gradient of the scale latent variable at x
"""
if index == 0:
return self.vi_loc_score(x)
elif index == 1:
return self.vi_scale_score(x)
# Optional Cythonized recursions below for GAS Normal models
@staticmethod
def gradient_recursion():
""" GAS Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Normal model - gradient only
"""
return gas_recursion_normal_orderone
@staticmethod
def newton_recursion():
""" GAS Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Normal model - adjusted score
"""
return gas_recursion_normal_ordertwo
@staticmethod
def gradientx_recursion():
""" GASX Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GASX Normal model - gradient only
"""
return gasx_recursion_normal_orderone
@staticmethod
def newtonx_recursion():
""" GASX Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GASX Normal model - adjusted score
"""
return gasx_recursion_normal_ordertwo
@staticmethod
def gradientllev_recursion():
""" GAS Local Level Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Level Normal model - gradient only
"""
return gas_llev_recursion_normal_orderone
@staticmethod
def newtonllev_recursion():
""" GAS Local Level Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Level Normal model - adjusted score
"""
return gas_llev_recursion_normal_ordertwo
@staticmethod
def gradientllt_recursion():
""" GAS Local Linear Trend Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Linear Trend Normal model - gradient only
"""
return gas_llt_recursion_normal_orderone
@staticmethod
def newtonllt_recursion():
""" GAS Local Linear Trend Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Linear Trend Normal model - adjusted score
"""
return gas_llt_recursion_normal_ordertwo
@staticmethod
def gradientreg_recursion():
""" GAS Dynamic Regression Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Dynamic Regression Normal model - gradient only
"""
return gas_reg_recursion_normal_orderone
@staticmethod
def newtonreg_recursion():
""" GAS Dynamic Regression Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Dynamic Regression Normal model - adjusted score
"""
return gas_reg_recursion_normal_ordertwo
|
|
"""
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = (
'TestCase', 'TransactionTestCase', 'APITestCase', 'AuthProviderTestCase',
'RuleTestCase', 'PermissionTestCase', 'PluginTestCase'
)
import base64
import os.path
import urllib
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, Exam
from nydus.db import create_cluster
from rest_framework.test import APITestCase as BaseAPITestCase
from sentry import auth
from sentry.auth.providers.dummy import DummyProvider
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, OrganizationMemberType, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from .fixtures import Fixtures
from .helpers import AuthProvider, Feature, get_auth_header, TaskRunner
def create_redis_conn():
options = {
'engine': 'nydus.db.backends.redis.Redis',
}
options.update(settings.SENTRY_REDIS_OPTIONS)
return create_cluster(options)
_redis_conn = create_redis_conn()
def flush_redis():
_redis_conn.flushdb()
class BaseTestCase(Fixtures, Exam):
urls = 'tests.sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'].startswith('http://testserver' + reverse('sentry-login'))
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def tasks(self):
return TaskRunner()
def feature(self, name, active=True):
"""
>>> with self.feature('feature:name')
>>> # ...
"""
return Feature(name, active)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
flush_redis()
def _makeMessage(self, data):
return json.dumps(data)
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.tasks():
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret),
)
return resp
def _getWithReferer(self, data, key=None, referer='getsentry.com', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.tasks():
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urllib.urlencode(qs)),
**headers
)
return resp
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = 'dummy'
def setUp(self):
super(AuthProviderTestCase, self).setUp()
auth.register(self.provider_name, self.provider)
self.addCleanup(auth.unregister, self.provider_name, self.provider)
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, data=None):
return self.rule_cls(
project=self.project,
data=data or {},
)
def get_state(self, **kwargs):
kwargs.setdefault('is_new', True)
kwargs.setdefault('is_regression', True)
kwargs.setdefault('is_sample', True)
kwargs.setdefault('rule_is_active', False)
kwargs.setdefault('rule_last_active', None)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 300
def assert_team_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
)
self.assert_can_access(user, path)
def assert_org_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
)
self.assert_can_access(user, path)
def assert_teamless_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
)
self.assert_can_access(user, path)
def assert_team_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
)
self.assert_cannot_access(user, path)
def assert_org_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
)
self.assert_cannot_access(user, path)
def assert_teamless_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
)
self.assert_cannot_access(user, path)
def assert_team_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_org_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_teamless_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_team_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_org_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_teamless_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_team_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_org_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_teamless_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_team_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_org_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_teamless_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_non_member_cannot_access(self, path):
user = self.create_user()
self.assert_cannot_access(user, path)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
|
|
import concurrent.futures
import configparser
import logging
import multiprocessing
import os
import shutil
import signal
import sys
import tabpy
from tabpy.tabpy import __version__
from tabpy.tabpy_server.app.app_parameters import ConfigParameters, SettingsParameters
from tabpy.tabpy_server.app.util import parse_pwd_file
from tabpy.tabpy_server.management.state import TabPyState
from tabpy.tabpy_server.management.util import _get_state_from_file
from tabpy.tabpy_server.psws.callbacks import init_model_evaluator, init_ps_server
from tabpy.tabpy_server.psws.python_service import PythonService, PythonServiceHandler
from tabpy.tabpy_server.handlers import (
EndpointHandler,
EndpointsHandler,
EvaluationPlaneHandler,
EvaluationPlaneDisabledHandler,
QueryPlaneHandler,
ServiceInfoHandler,
StatusHandler,
UploadDestinationHandler,
)
import tornado
logger = logging.getLogger(__name__)
def _init_asyncio_patch():
"""
Select compatible event loop for Tornado 5+.
As of Python 3.8, the default event loop on Windows is `proactor`,
however Tornado requires the old default "selector" event loop.
As Tornado has decided to leave this to users to set, MkDocs needs
to set it. See https://github.com/tornadoweb/tornado/issues/2608.
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import WindowsSelectorEventLoopPolicy
except ImportError:
pass # Can't assign a policy which doesn't exist.
else:
if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
class TabPyApp:
"""
TabPy application class for keeping context like settings, state, etc.
"""
settings = {}
subdirectory = ""
tabpy_state = None
python_service = None
credentials = {}
def __init__(self, config_file):
if config_file is None:
config_file = os.path.join(
os.path.dirname(__file__), os.path.pardir, "common", "default.conf"
)
if os.path.isfile(config_file):
try:
from logging import config
config.fileConfig(config_file, disable_existing_loggers=False)
except KeyError:
logging.basicConfig(level=logging.DEBUG)
self._parse_config(config_file)
def run(self):
application = self._create_tornado_web_app()
max_request_size = (
int(self.settings[SettingsParameters.MaxRequestSizeInMb]) * 1024 * 1024
)
logger.info(f"Setting max request size to {max_request_size} bytes")
init_model_evaluator(self.settings, self.tabpy_state, self.python_service)
protocol = self.settings[SettingsParameters.TransferProtocol]
ssl_options = None
if protocol == "https":
ssl_options = {
"certfile": self.settings[SettingsParameters.CertificateFile],
"keyfile": self.settings[SettingsParameters.KeyFile],
}
elif protocol != "http":
msg = f"Unsupported transfer protocol {protocol}."
logger.critical(msg)
raise RuntimeError(msg)
application.listen(
self.settings[SettingsParameters.Port],
ssl_options=ssl_options,
max_buffer_size=max_request_size,
max_body_size=max_request_size,
)
logger.info(
"Web service listening on port "
f"{str(self.settings[SettingsParameters.Port])}"
)
tornado.ioloop.IOLoop.instance().start()
def _create_tornado_web_app(self):
class TabPyTornadoApp(tornado.web.Application):
is_closing = False
def signal_handler(self, signal, _):
logger.critical(f"Exiting on signal {signal}...")
self.is_closing = True
def try_exit(self):
if self.is_closing:
tornado.ioloop.IOLoop.instance().stop()
logger.info("Shutting down TabPy...")
logger.info("Initializing TabPy...")
tornado.ioloop.IOLoop.instance().run_sync(
lambda: init_ps_server(self.settings, self.tabpy_state)
)
logger.info("Done initializing TabPy.")
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=multiprocessing.cpu_count()
)
# initialize Tornado application
_init_asyncio_patch()
application = TabPyTornadoApp(
[
(
self.subdirectory + r"/query/([^/]+)",
QueryPlaneHandler,
dict(app=self),
),
(self.subdirectory + r"/status", StatusHandler, dict(app=self)),
(self.subdirectory + r"/info", ServiceInfoHandler, dict(app=self)),
(self.subdirectory + r"/endpoints", EndpointsHandler, dict(app=self)),
(
self.subdirectory + r"/endpoints/([^/]+)?",
EndpointHandler,
dict(app=self),
),
(
self.subdirectory + r"/evaluate",
EvaluationPlaneHandler if self.settings[SettingsParameters.EvaluateEnabled]
else EvaluationPlaneDisabledHandler,
dict(executor=executor, app=self),
),
(
self.subdirectory + r"/configurations/endpoint_upload_destination",
UploadDestinationHandler,
dict(app=self),
),
(
self.subdirectory + r"/(.*)",
tornado.web.StaticFileHandler,
dict(
path=self.settings[SettingsParameters.StaticPath],
default_filename="index.html",
),
),
],
debug=False,
**self.settings,
)
signal.signal(signal.SIGINT, application.signal_handler)
tornado.ioloop.PeriodicCallback(application.try_exit, 500).start()
signal.signal(signal.SIGINT, application.signal_handler)
tornado.ioloop.PeriodicCallback(application.try_exit, 500).start()
return application
def _set_parameter(self, parser, settings_key, config_key, default_val, parse_function):
key_is_set = False
if (
config_key is not None
and parser.has_section("TabPy")
and parser.has_option("TabPy", config_key)
):
if parse_function is None:
parse_function = parser.get
self.settings[settings_key] = parse_function("TabPy", config_key)
key_is_set = True
logger.debug(
f"Parameter {settings_key} set to "
f'"{self.settings[settings_key]}" '
"from config file or environment variable"
)
if not key_is_set and default_val is not None:
self.settings[settings_key] = default_val
key_is_set = True
logger.debug(
f"Parameter {settings_key} set to "
f'"{self.settings[settings_key]}" '
"from default value"
)
if not key_is_set:
logger.debug(f"Parameter {settings_key} is not set")
def _parse_config(self, config_file):
"""Provide consistent mechanism for pulling in configuration.
Attempt to retain backward compatibility for
existing implementations by grabbing port
setting from CLI first.
Take settings in the following order:
1. CLI arguments if present
2. config file
3. OS environment variables (for ease of
setting defaults if not present)
4. current defaults if a setting is not present in any location
Additionally provide similar configuration capabilities in between
config file and environment variables.
For consistency use the same variable name in the config file as
in the os environment.
For naming standards use all capitals and start with 'TABPY_'
"""
self.settings = {}
self.subdirectory = ""
self.tabpy_state = None
self.python_service = None
self.credentials = {}
pkg_path = os.path.dirname(tabpy.__file__)
parser = configparser.ConfigParser(os.environ)
logger.info(f"Parsing config file {config_file}")
file_exists = False
if os.path.isfile(config_file):
try:
with open(config_file, 'r') as f:
parser.read_string(f.read())
file_exists = True
except Exception:
pass
if not file_exists:
logger.warning(
f"Unable to open config file {config_file}, "
"using default settings."
)
settings_parameters = [
(SettingsParameters.Port, ConfigParameters.TABPY_PORT, 9004, None),
(SettingsParameters.ServerVersion, None, __version__, None),
(SettingsParameters.EvaluateEnabled, ConfigParameters.TABPY_EVALUATE_ENABLE,
True, parser.getboolean),
(SettingsParameters.EvaluateTimeout, ConfigParameters.TABPY_EVALUATE_TIMEOUT,
30, parser.getfloat),
(SettingsParameters.UploadDir, ConfigParameters.TABPY_QUERY_OBJECT_PATH,
os.path.join(pkg_path, "tmp", "query_objects"), None),
(SettingsParameters.TransferProtocol, ConfigParameters.TABPY_TRANSFER_PROTOCOL,
"http", None),
(SettingsParameters.CertificateFile, ConfigParameters.TABPY_CERTIFICATE_FILE,
None, None),
(SettingsParameters.KeyFile, ConfigParameters.TABPY_KEY_FILE, None, None),
(SettingsParameters.StateFilePath, ConfigParameters.TABPY_STATE_PATH,
os.path.join(pkg_path, "tabpy_server"), None),
(SettingsParameters.StaticPath, ConfigParameters.TABPY_STATIC_PATH,
os.path.join(pkg_path, "tabpy_server", "static"), None),
(ConfigParameters.TABPY_PWD_FILE, ConfigParameters.TABPY_PWD_FILE, None, None),
(SettingsParameters.LogRequestContext, ConfigParameters.TABPY_LOG_DETAILS,
"false", None),
(SettingsParameters.MaxRequestSizeInMb, ConfigParameters.TABPY_MAX_REQUEST_SIZE_MB,
100, None),
]
for setting, parameter, default_val, parse_function in settings_parameters:
self._set_parameter(parser, setting, parameter, default_val, parse_function)
if not os.path.exists(self.settings[SettingsParameters.UploadDir]):
os.makedirs(self.settings[SettingsParameters.UploadDir])
# set and validate transfer protocol
self.settings[SettingsParameters.TransferProtocol] = self.settings[
SettingsParameters.TransferProtocol
].lower()
self._validate_transfer_protocol_settings()
# if state.ini does not exist try and create it - remove
# last dependence on batch/shell script
self.settings[SettingsParameters.StateFilePath] = os.path.realpath(
os.path.normpath(
os.path.expanduser(self.settings[SettingsParameters.StateFilePath])
)
)
state_config, self.tabpy_state = self._build_tabpy_state()
self.python_service = PythonServiceHandler(PythonService())
self.settings["compress_response"] = True
self.settings[SettingsParameters.StaticPath] = os.path.abspath(
self.settings[SettingsParameters.StaticPath]
)
logger.debug(
f"Static pages folder set to "
f'"{self.settings[SettingsParameters.StaticPath]}"'
)
# Set subdirectory from config if applicable
if state_config.has_option("Service Info", "Subdirectory"):
self.subdirectory = "/" + state_config.get("Service Info", "Subdirectory")
# If passwords file specified load credentials
if ConfigParameters.TABPY_PWD_FILE in self.settings:
if not self._parse_pwd_file():
msg = (
"Failed to read passwords file "
f"{self.settings[ConfigParameters.TABPY_PWD_FILE]}"
)
logger.critical(msg)
raise RuntimeError(msg)
else:
logger.info(
"Password file is not specified: " "Authentication is not enabled"
)
features = self._get_features()
self.settings[SettingsParameters.ApiVersions] = {"v1": {"features": features}}
self.settings[SettingsParameters.LogRequestContext] = (
self.settings[SettingsParameters.LogRequestContext].lower() != "false"
)
call_context_state = (
"enabled"
if self.settings[SettingsParameters.LogRequestContext]
else "disabled"
)
logger.info(f"Call context logging is {call_context_state}")
def _validate_transfer_protocol_settings(self):
if SettingsParameters.TransferProtocol not in self.settings:
msg = "Missing transfer protocol information."
logger.critical(msg)
raise RuntimeError(msg)
protocol = self.settings[SettingsParameters.TransferProtocol]
if protocol == "http":
return
if protocol != "https":
msg = f"Unsupported transfer protocol: {protocol}"
logger.critical(msg)
raise RuntimeError(msg)
self._validate_cert_key_state(
"The parameter(s) {} must be set.",
SettingsParameters.CertificateFile in self.settings,
SettingsParameters.KeyFile in self.settings,
)
cert = self.settings[SettingsParameters.CertificateFile]
self._validate_cert_key_state(
"The parameter(s) {} must point to " "an existing file.",
os.path.isfile(cert),
os.path.isfile(self.settings[SettingsParameters.KeyFile]),
)
tabpy.tabpy_server.app.util.validate_cert(cert)
@staticmethod
def _validate_cert_key_state(msg, cert_valid, key_valid):
cert_and_key_param = (
f"{ConfigParameters.TABPY_CERTIFICATE_FILE} and "
f"{ConfigParameters.TABPY_KEY_FILE}"
)
https_error = "Error using HTTPS: "
err = None
if not cert_valid and not key_valid:
err = https_error + msg.format(cert_and_key_param)
elif not cert_valid:
err = https_error + msg.format(ConfigParameters.TABPY_CERTIFICATE_FILE)
elif not key_valid:
err = https_error + msg.format(ConfigParameters.TABPY_KEY_FILE)
if err is not None:
logger.critical(err)
raise RuntimeError(err)
def _parse_pwd_file(self):
succeeded, self.credentials = parse_pwd_file(
self.settings[ConfigParameters.TABPY_PWD_FILE]
)
if succeeded and len(self.credentials) == 0:
logger.error("No credentials found")
succeeded = False
return succeeded
def _get_features(self):
features = {}
# Check for auth
if ConfigParameters.TABPY_PWD_FILE in self.settings:
features["authentication"] = {
"required": True,
"methods": {"basic-auth": {}},
}
features["evaluate_enabled"] = self.settings[SettingsParameters.EvaluateEnabled]
return features
def _build_tabpy_state(self):
pkg_path = os.path.dirname(tabpy.__file__)
state_file_dir = self.settings[SettingsParameters.StateFilePath]
state_file_path = os.path.join(state_file_dir, "state.ini")
if not os.path.isfile(state_file_path):
state_file_template_path = os.path.join(
pkg_path, "tabpy_server", "state.ini.template"
)
logger.debug(
f"File {state_file_path} not found, creating from "
f"template {state_file_template_path}..."
)
shutil.copy(state_file_template_path, state_file_path)
logger.info(f"Loading state from state file {state_file_path}")
tabpy_state = _get_state_from_file(state_file_dir)
return tabpy_state, TabPyState(config=tabpy_state, settings=self.settings)
|
|
"""
Utilities and glue code for providing a command-line interface to
module functions.
"""
import sys
import argparse
def add_cmdline_args(group_name, parser):
"""
All module-specific command-line arguments are specified here, split
into groups by functionality.
Any command-line interfacing function can include any subset of
these groups.
"""
if group_name == 'common':
parser.add_argument(
'--force',
action="store_true",
default=False)
parser.add_argument(
'--random_seed',
type=int,
default=42)
# Stuff for selecting the dataset and limiting the number of images:
elif group_name == 'dataset':
parser.add_argument(
'--dataset',
help="select which dataset to use",
required=True)
parser.add_argument(
'--source_dataset',
help="select which dataset to use as trained clf source",
type=str,
default=None)
parser.add_argument(
'--num_images',
help="number of images to use from the dataset (-1 for all)",
type=int,
default=-1)
parser.add_argument(
'--force_dataset',
help="Force reloading of dataset from scratch",
action="store_true",
default=False)
# Stuff for selecting list of features and forcing their overwrite.
elif group_name == 'feature':
parser.add_argument(
'--features',
help="Comma-separated list of FEATURES from vislab/feature.py",
default='size')
parser.add_argument(
'--standardize',
help="Standardize features during caching?",
action="store_true",
default=False)
parser.add_argument(
'--force_features',
help="force overwrite of existing features",
action="store_true",
default=False)
# Stuff for distributed computation:
elif group_name == 'processing':
parser.add_argument(
'--num_workers',
help="number of workers to use in processing jobs",
type=int,
default=1)
parser.add_argument(
'--chunk_size',
help="number of jobs to assign to a worker at once",
type=int,
default=20)
parser.add_argument(
'--mem',
help="amount of memory that a single worker will use",
type=int,
default=3000)
parser.add_argument(
'--cpus_per_task',
help="number of cpus that a single worker will use",
type=int,
default=4)
# Forming a prediction dataset and setting properties of predictor.
elif group_name == 'prediction':
parser.add_argument(
'--prediction_label',
required=True,
help="""
column of the dataframe to use for label.
can contain wildcard characters,
i.e. 'style_*' will match multiple columns""")
parser.add_argument(
'--collection_name',
help="name of the collection to write prediction results to",
default="default")
parser.add_argument(
'--test_frac',
help="fraction of dataset to use for testing",
type=float,
default=0.2)
parser.add_argument(
'--balanced',
help="should the validation set be balanced for multiclass",
action="store_true",
default=False)
parser.add_argument(
'--min_pos_frac',
help="minimum fraction of positive examples in training",
type=float,
default=0.1)
parser.add_argument(
'--quadratic',
help="perform quadratic expansion of the features",
type=str,
default=None)
parser.add_argument(
'--bit_precision',
help="bit precision of the VW classifier",
type=int,
default=18)
parser.add_argument(
'--force_predict',
help="force overwrite of existing results",
action="store_true",
default=False)
parser.add_argument(
'--ava_num_train',
help="number of training images to use",
type=int,
default=-1)
parser.add_argument(
'--ava_delta',
help="AVA: only use images >= delta from mean rating",
type=float,
default=0.0)
else:
raise Exception("Unknown group!")
def get_args(script_name, calling_function_name, groups=None):
"""
Parse and return all options and arguments.
Parameters
----------
calling_function_name: string
groups: list
List of groups of options to include.
Returns
-------
args: argparse.Namespace
"""
usage = "python {} {} [options]".format(
script_name, calling_function_name)
parser = argparse.ArgumentParser(usage)
all_groups = [
'common', 'processing', 'dataset', 'feature', 'prediction'
]
if groups is None:
groups = all_groups
# Common arguments are always part of the argument parser.
if 'common' not in groups:
groups.append('common')
for group in groups:
add_cmdline_args(group, parser)
# Get the parsed options and arguments, keeping in mind that the
# first argument is the name of the calling function.
parser.add_argument('function', nargs=1)
args = parser.parse_args()
# Split features into a list.
if 'features' in args:
args.features = args.features.split(',')
return args
def run_function_in_file(name, possible_functions):
"""
Provide a command line interface to call a function in a file.
Simply call the function named as the first commandline argument.
Parameters
----------
name: string
Name of the file that is calling this method.
What the user executed.
possible_functions: dict
name: function
"""
def print_usage():
print("usage:\tpython {} <function> [args] <args>".format(name))
print("possible functions:")
for func in possible_functions.keys():
print("\t" + func)
sys.exit(1)
if len(sys.argv) < 2:
print_usage()
selected_function = sys.argv[1]
if selected_function in possible_functions:
possible_functions[selected_function]()
else:
print("Unknown function: {}".format(selected_function))
print_usage()
|
|
from __future__ import unicode_literals, print_function
import pytest
from oar.lib import db
from oar.kao.job import insert_job
from oar.kao.meta_sched import meta_schedule
import oar.lib.tools # for monkeypatching
from oar.lib.tools import get_date
@pytest.yield_fixture(scope='function', autouse=True)
def minimal_db_initialization(request):
with db.session(ephemeral=True):
db['Queue'].create(name='default', priority=3, scheduler_policy='kamelot', state='Active')
# add some resources
for i in range(5):
db['Resource'].create(network_address="localhost" + str(int(i / 2)))
yield
@pytest.fixture(scope='function', autouse=True)
def monkeypatch_tools(request, monkeypatch):
monkeypatch.setattr(oar.lib.tools, 'init_judas_notify_user', lambda: None)
monkeypatch.setattr(oar.lib.tools, 'create_almighty_socket', lambda: None)
monkeypatch.setattr(oar.lib.tools, 'notify_almighty', lambda x: len(x))
monkeypatch.setattr(oar.lib.tools, 'notify_tcp_socket', lambda addr, port, msg: len(msg))
monkeypatch.setattr(oar.lib.tools, 'notify_user', lambda job, state, msg: len(state + msg))
def _test_db_timesharing_1(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["timesharing=*,*"])
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["timesharing=*,*"])
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] == res[1]
def test_db_timesharing_2(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=4', "")])], properties="",
types=["timesharing=user,*"], user='toto')
insert_job(res=[(60, [('resource_id=4', "")])], properties="",
types=["timesharing=user,*"], user='titi')
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] != res[1]
def test_db_timesharing_3(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["timesharing=*,*"])
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["timesharing=*,*"])
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.id, j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] == res[1]
def test_db_properties_1(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=2', "")])], properties="network_address='localhost1'")
insert_job(res=[(60, [('resource_id=2', "")])], properties="network_address='localhost1'")
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] != res[1]
def test_db_properties_2(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=2', "network_address='localhost1'")])], properties="")
insert_job(res=[(60, [('resource_id=2', "network_address='localhost1'")])], properties="")
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] != res[1]
def test_db_properties_3(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=2', "network_address='localhost0'")])],
properties="network_address='localhost0'")
insert_job(res=[(60, [('resource_id=2', "network_address='localhost1'")])],
properties="network_address='localhost1'")
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] == res[1]
def _test_db_placeholder_1(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["placeholder=yop"])
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["allow=yop"])
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] == res[1]
def test_db_placeholder_2(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["placeholder=yop"])
insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["allow=poy"])
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] != res[1]
def test_db_moldable_1(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=3', "")])], properties="")
insert_job(res=[(60, [('resource_id=4', "")]), (70, [('resource_id=3', "")])], properties="")
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] != res[1]
def test_db_moldable_2(monkeypatch):
now = get_date()
insert_job(res=[(60, [('resource_id=3', "")])], properties="")
insert_job(res=[(60, [('resource_id=4', "")]), (70, [('resource_id=2', "")])], properties="")
meta_schedule('internal')
for j in db['Job'].query.all():
print(j.state)
res = []
for i in db['GanttJobsPrediction'].query.all():
print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
res.append(i.start_time - now)
assert res[0] == res[1]
def test_db_suspended_duration_1(monkeypatch):
insert_job(res=[(60, [('resource_id=3', "")])], properties="", suspended='YES')
meta_schedule('internal')
job = db['Job'].query.one()
assert (job.state == 'toLaunch')
# set_job_state(job.id, 'Running')
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:10011")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:10011")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the system configuration methods work properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def reset_eager(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
# Reset the context.
context._context = None
ops.enable_eager_execution_internal()
assert context._context is not None
return wrapper
class ConfigTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
@reset_eager
def testDevicePolicy(self):
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
# If no op has been executed we should be able to set the device policy as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_device_policy('silent')
config.set_intra_op_parallelism_threads(2)
context.ensure_initialized()
def copy_tensor(dtype=dtypes.int32):
cpu_tensor = constant_op.constant(1, dtype=dtype)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
config.set_device_policy('silent')
self.assertEqual(config.get_device_policy(), 'silent')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
copy_tensor()
config.set_device_policy('silent_for_int32')
self.assertEqual(config.get_device_policy(), 'silent_for_int32')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor(dtypes.float32)
copy_tensor()
config.set_device_policy('warn')
self.assertEqual(config.get_device_policy(), 'warn')
self.assertEqual(context.DEVICE_PLACEMENT_WARN,
context.context().device_policy)
copy_tensor()
config.set_device_policy('explicit')
self.assertEqual(config.get_device_policy(), 'explicit')
self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor()
config.set_device_policy(None)
self.assertEqual(config.get_device_policy(), 'silent')
@reset_eager
def testExecutionMode(self):
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
# If no op has been executed we should be able to set the execution mode as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_synchronous_execution(False)
config.set_intra_op_parallelism_threads(2)
config.set_synchronous_execution(True)
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
config.set_synchronous_execution(False)
self.assertFalse(config.get_synchronous_execution())
self.assertEqual(context.ASYNC, context.context().execution_mode)
@reset_eager
def testIntraOpParallelismThreads(self):
config.set_intra_op_parallelism_threads(10)
self.assertEqual(
config.get_intra_op_parallelism_threads(),
context.context().intra_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_intra_op_parallelism_threads(1)
config.set_intra_op_parallelism_threads(10)
@reset_eager
def testInterOpParallelismThreads(self):
config.set_inter_op_parallelism_threads(10)
self.assertEqual(
config.get_inter_op_parallelism_threads(),
context.context().inter_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_inter_op_parallelism_threads(1)
config.set_inter_op_parallelism_threads(10)
@test_util.run_gpu_only
@reset_eager
def testSoftPlacement(self):
if context.executing_eagerly():
self.assertTrue(config.get_soft_device_placement())
else:
self.assertFalse(config.get_soft_device_placement())
def mod():
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
return math_ops.mod(a, b)
config.set_soft_device_placement(True)
self.assertEqual(config.get_soft_device_placement(), True)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is enabled, the mod operation should fallback to CPU
# with pure eager execution as well as functions
mod()
def_function.function(mod)()
config.set_soft_device_placement(False)
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is disabled, the mod operation should fail on GPU
# with pure eager execution as well as functions
with self.assertRaises(errors.InvalidArgumentError):
mod()
with self.assertRaises(errors.InvalidArgumentError):
def_function.function(mod)()
@reset_eager
def testLogDevicePlacement(self):
self.assertFalse(context.get_log_device_placement())
context.set_log_device_placement(True)
self.assertEqual(context.get_log_device_placement(), True)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.set_log_device_placement(False)
self.assertEqual(context.get_log_device_placement(), False)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.ensure_initialized()
# Changing the device placement should not throw an exception
context.set_log_device_placement(True)
@reset_eager
def testEnableMlirBridge(self):
# Default value of enable_mlir_bridge is false.
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
# Tests enabling mlir bridge.
config.enable_mlir_bridge()
self.assertTrue(context.context().config.experimental.enable_mlir_bridge)
# Tests disabling mlir bridge.
config.disable_mlir_bridge()
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
@reset_eager
def testEnableMlirGraphOptimization(self):
# Default value of enable_mlir_graph_optimization is false.
self.assertFalse(
context.context().config.experimental.enable_mlir_graph_optimization)
# Tests enabling mlir graph optimization.
config.enable_mlir_graph_optimization()
self.assertTrue(
context.context().config.experimental.enable_mlir_graph_optimization)
# Tests disabling mlir graph optimization.
config.disable_mlir_graph_optimization()
self.assertFalse(
context.context().config.experimental.enable_mlir_graph_optimization)
@test_util.run_gpu_only
@reset_eager
def testJit(self):
self.assertEqual(config.get_optimizer_jit(), False)
# the following function should cause Op fusion to occur. However, there is
# unfortunately no straightforward way to ensure this. We will just have to
# settle for creating a test that can trigger JIT.
@def_function.function
def fun(a, b):
c = a * b
d = c + a
return d
a = constant_op.constant([2., 2.])
b = constant_op.constant([2., 2.])
self.evaluate(fun(a, b))
config.set_optimizer_jit(True)
self.assertEqual(config.get_optimizer_jit(), True)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
config.set_optimizer_jit(False)
self.assertEqual(config.get_optimizer_jit(), False)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
@parameterized.named_parameters(
('LayoutOptimizer', 'layout_optimizer'),
('ConstantFolding', 'constant_folding'),
('ShapeOptimization', 'shape_optimization'),
('Remapping', 'remapping'),
('ArithmeticOptimization', 'arithmetic_optimization'),
('DependencyOptimization', 'dependency_optimization'),
('LoopOptimization', 'loop_optimization'),
('FunctionOptimization', 'function_optimization'),
('DebugStripper', 'debug_stripper'),
('ScopedAllocatorOptimization', 'scoped_allocator_optimization'),
('ImplementationSelector', 'implementation_selector'),
('AutoMixedPrecision', 'auto_mixed_precision'))
@reset_eager
def testOptimizerToggleOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@parameterized.named_parameters(
('DisableModelPruning', 'disable_model_pruning'),
('DisableMetaOptimizer', 'disable_meta_optimizer'))
@reset_eager
def testOptimizerBoolOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertFalse(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@test_util.run_gpu_only
@reset_eager
def testOptimizerToggleOptionPinToHost(self):
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get('pin_to_host_optimization'))
@def_function.function
def fun():
op = test_ops.device_placement_op()
return op
# Force optimizer to run for all graphs
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
options['min_graph_nodes'] = -1
# Since pin to host is disabled, the operation should go on GPU
gpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': True})
options['pin_to_host_optimization'] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is enabled, the operation should go on CPU
cpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('CPU'), cpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': False})
options['pin_to_host_optimization'] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is disabled again, the operation should go on GPU
gpu2 = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu2)
class DeviceTest(test.TestCase):
@reset_eager
def testPhysicalDevices(self):
cpus = config.list_physical_devices('CPU')
self.assertGreater(len(cpus), 0)
if test_util.is_gpu_available():
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
@reset_eager
def testCpuMultiple(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
vcpus = config.list_logical_devices('CPU')
self.assertEqual(len(vcpus), 2)
with ops.device('/device:CPU:0'):
a = constant_op.constant(1.0)
self.evaluate(a)
with ops.device('/device:CPU:1'):
b = constant_op.constant(1.0)
self.evaluate(b)
with ops.device('/device:CPU:2'):
c = constant_op.constant(1.0)
self.evaluate(c)
self.assertIn('CPU:0', c.device)
# Ensure we can place ops on each of the device names
for vcpu in vcpus:
with ops.device(vcpu.name):
d = constant_op.constant(1.0)
self.evaluate(d)
# Modifying the CPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
# Setting the same CPU configuration is fine
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
@test_util.run_gpu_only
@reset_eager
def testGpuNone(self):
config.set_soft_device_placement(False)
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertGreater(len(config.get_visible_devices('GPU')), 0)
self.assertEqual(len(config.get_visible_devices('XLA_GPU')), 0)
config.set_visible_devices(cpus[0])
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertEqual(len(config.get_visible_devices('GPU')), 0)
self.assertEqual(len(config.list_logical_devices('XLA_GPU')), 0)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Could not satisfy'):
with ops.device('/device:GPU:0'):
a = array_ops.identity(1.0)
self.evaluate(a)
# Modifying the visible devices is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_visible_devices(gpus)
# Setting the same visible devices is fine
config.set_visible_devices(cpus[0])
@reset_eager
def testGpuMultiple(self):
gpus = config.list_physical_devices('GPU')
if len(gpus) < 2:
self.skipTest('Need at least 2 GPUs')
context.ensure_initialized()
for i in range(0, len(gpus)):
with ops.device('/device:GPU:' + str(i)):
a = constant_op.constant(1.0)
self.evaluate(a)
with self.assertRaisesRegex(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:' + str(len(gpus))):
a = constant_op.constant(1.0)
self.evaluate(a)
@reset_eager
def testDeviceDetails(self):
(cpu,) = config.list_physical_devices('CPU')
details = config.get_device_details(cpu)
self.assertEqual(details, {})
if not test_util.is_gpu_available():
return
gpus = config.list_physical_devices('GPU')
details = config.get_device_details(gpus[0])
self.assertIsInstance(details['device_name'], str)
self.assertNotEmpty(details['device_name'])
if test.is_built_with_rocm():
# AMD GPUs do not have a compute capability
self.assertNotIn('compute_capability', details)
else:
cc = details['compute_capability']
self.assertIsInstance(cc, tuple)
major, minor = cc
self.assertGreater(major, 0)
self.assertGreaterEqual(minor, 0)
# Test GPU returned from get_visible_devices
if len(gpus) > 2:
config.set_visible_devices(gpus[1], 'GPU')
(visible_gpu,) = config.get_visible_devices('GPU')
details = config.get_device_details(visible_gpu)
self.assertIsInstance(details['device_name'], str)
@reset_eager
def testDeviceDetailsErrors(self):
logical_devices = config.list_logical_devices()
with self.assertRaisesRegex(ValueError,
'must be a tf.config.PhysicalDevice'):
config.get_device_details(logical_devices[0])
phys_dev = context.PhysicalDevice('/physical_device:CPU:100', 'CPU')
with self.assertRaisesRegex(
ValueError, 'The PhysicalDevice must be one obtained from '
'calling `tf.config.list_physical_devices`'):
config.get_device_details(phys_dev)
@test_util.run_gpu_only
@reset_eager
def testVirtualGpu(self):
config.set_soft_device_placement(False)
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
self.assertEqual(len(config.get_logical_device_configuration(gpus[-1])), 2)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus) + 1)
for i in range(0, len(logical_gpus)):
with ops.device('/device:GPU:' + str(i)):
a = array_ops.identity(1.0)
self.evaluate(a)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Could not satisfy'):
with ops.device('/device:GPU:' + str(len(logical_gpus))):
a = array_ops.identity(1.0)
self.evaluate(a)
# Modifying the GPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=20),
context.LogicalDeviceConfiguration(memory_limit=20)
])
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
# Setting the same GPU configuration is fine
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
@test_util.run_gpu_only
@reset_eager
def testGpuGrowth(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_memory_growth(gpus[-1]))
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus))
# Modifying the GPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
for gpu in gpus:
config.set_memory_growth(gpu, False)
# Setting the same GPU configuration is fine
for gpu in gpus:
config.set_memory_growth(gpu, True)
@test_util.run_gpu_only
@reset_eager
def testGetMemoryUsage(self):
device = array_ops.zeros([]).backing_device
self.assertGreater(config.get_memory_usage(device), 0)
@test_util.run_gpu_only
@reset_eager
def testGetMemoryUsageSubstring(self):
self.assertGreater(config.get_memory_usage('GPU:0'), 0)
@reset_eager
def testGetMemoryUsageCPU(self):
with self.assertRaisesRegex(ValueError, 'CPU does not support'):
config.get_memory_usage('CPU:0')
@reset_eager
def testGetMemoryUsageUnknownDevice(self):
with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):
config.get_memory_usage('unknown_device')
@test_util.run_gpu_only
@reset_eager
def testGetMemoryUsageAmbiguousDevice(self):
if len(config.list_physical_devices('GPU')) < 2:
self.skipTest('Need at least 2 GPUs')
with self.assertRaisesRegex(ValueError, 'Multiple devices'):
config.get_memory_usage('GPU')
@test_util.run_gpu_only
@reset_eager
def testGpuInvalidConfig(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
if len(gpus) > 1:
# Assert if other GPUs were not configured
config.set_memory_growth(gpus[0], True)
with self.assertRaisesRegex(ValueError, 'cannot differ'):
c = context.context().config
# If we limit visibility to GPU 0, growth is fine
config.set_visible_devices(gpus[0], 'GPU')
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
# Default setting for second GPU is False and works if we set visibility
config.set_visible_devices(gpus[1], 'GPU')
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
# Growth now fails because all the GPUs are visible and not the same
config.set_visible_devices(gpus, 'GPU')
with self.assertRaisesRegex(ValueError, 'cannot differ'):
c = context.context().config
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
with self.assertRaisesRegex(ValueError, 'memory limit'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
with self.assertRaisesRegex(ValueError, 'virtual devices'):
config.set_memory_growth(gpus[-1], False)
@test_util.run_gpu_only
@reset_eager
def testRemote(self):
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
context.ensure_initialized()
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
for gpu in gpus:
self.assertIsNotNone(gpu.name)
context.ensure_initialized()
job_name = 'test'
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = 'localhost:0'
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name=job_name, task_index=0, protocol='grpc')
context.set_server_def(server_def)
gpus = config.list_logical_devices('GPU')
for gpu in gpus:
self.assertIsNotNone(gpu.name)
@reset_eager
def testV1CompatibilityDummyInvisibleDeviceList(self):
gpus = config.list_physical_devices('GPU')
if gpus:
self.skipTest('Test requires no GPUs')
# Ensure GPU options left untouched on CPU only environments
context.context()._physical_devices = None
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list='0'))
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list, '0')
@test_util.run_gpu_only
@reset_eager
def testV1Compatibility(self):
# Ensure we set 1 CPU by default
context.context()._config = config_pb2.ConfigProto()
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 1)
context.context()._physical_devices = None
# Ensure CPU is split
context.context()._config = config_pb2.ConfigProto(device_count={'CPU': 2})
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 2)
context.context()._physical_devices = None
# Handle empty visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=''))
gpus = config.list_physical_devices('GPU')
gpu_count = len(gpus)
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
','.join(str(i) for i in range(len(gpus))))
context.context()._physical_devices = None
# Handle invalid visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count)))
with self.assertRaisesRegex(ValueError, 'Invalid visible device index'):
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
context.context()._physical_devices = None
# Handle single visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count-1)))
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
str(gpu_count-1))
context.context()._physical_devices = None
def testConfigureCollectiveOps(self):
context.context().configure_collective_ops(
collective_leader='/job:worker/replica:0/task:0',
scoped_allocator_enabled_ops=('CollectiveReduce',),
use_nccl_communication=False,
device_filters=['/job:worker/task:1'])
new_config = context.context().config
# Verify group leader
self.assertEqual('/job:worker/replica:0/task:0',
new_config.experimental.collective_group_leader)
# Verify device filters.
self.assertEqual(['/job:worker/task:1'], new_config.device_filters)
# Verify rewrite options.
new_rewrite_options = new_config.graph_options.rewrite_options
self.assertEqual(rewriter_config_pb2.RewriterConfig.ON,
new_rewrite_options.scoped_allocator_optimization)
self.assertEqual(['CollectiveReduce'],
new_rewrite_options.scoped_allocator_opts.enable_op)
class TensorFloat32Test(test.TestCase):
def setUp(self):
super(TensorFloat32Test, self).setUp()
if not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest('TensorFloat-32 requires an NVIDIA GPU with compute '
'capability of at least 8.0')
def tearDown(self):
super(TensorFloat32Test, self).tearDown()
config.enable_tensor_float_32_execution(True)
def test_tensor_float_32_enabled(self):
self.assertTrue(config.tensor_float_32_execution_enabled())
x = array_ops.fill((8, 8), 1 + 2**-20)
y = array_ops.ones((8, 8))
out = math_ops.matmul(x, y)
# In TensorFloat-32, each element of x is rounded to 1, so the output will
# be 8s.
expected = array_ops.fill((8, 8), 8)
self.assertAllEqual(out, expected)
def test_tensor_float_32_disabled(self):
self.assertTrue(config.tensor_float_32_execution_enabled())
config.enable_tensor_float_32_execution(False)
self.assertFalse(config.tensor_float_32_execution_enabled())
x = array_ops.fill((8, 8), 1 + 2**-20)
y = array_ops.ones((8, 8))
out = math_ops.matmul(x, y)
expected = array_ops.fill((8, 8), 8 * (1 + 2**-20))
self.assertAllEqual(out, expected)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('dill')
from toolz import (merge, join, pipe, filter, identity, merge_with, take,
partial)
import math
from dask.bag.core import (Bag, lazify, lazify_task, fuse, map, collect,
reduceby, bz2_stream, stream_decompress, reify)
from dask.utils import filetexts, tmpfile, raises
import dask
from pbag import PBag
import dask.bag as db
import shutil
import os
import gzip
import bz2
from collections import Iterator
dsk = {('x', 0): (range, 5),
('x', 1): (range, 5),
('x', 2): (range, 5)}
L = list(range(5)) * 3
b = Bag(dsk, 'x', 3)
def inc(x):
return x + 1
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def add(x, y):
return x + y
def test_Bag():
assert b.name == 'x'
assert b.npartitions == 3
def test_keys():
assert sorted(b._keys()) == sorted(dsk.keys())
def test_map():
c = b.map(inc)
expected = merge(dsk, dict(((c.name, i), (reify, (map, inc, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
def test_map_function_with_multiple_arguments():
b = db.from_sequence([(1, 10), (2, 20), (3, 30)], npartitions=3)
assert list(b.map(lambda x, y: x + y)) == [11, 22, 33]
def test_filter():
c = b.filter(iseven)
expected = merge(dsk, dict(((c.name, i),
(reify, (filter, iseven, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
def test_remove():
assert list(b.remove(lambda x: x % 2 == 0)) == [1, 3] * 3
def test_iter():
assert sorted(list(b)) == sorted(L)
assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)
def test_pluck():
d = {('x', 0): [(1, 10), (2, 20)],
('x', 1): [(3, 30), (4, 40)]}
b = Bag(d, 'x', 2)
assert set(b.pluck(0)) == set([1, 2, 3, 4])
assert set(b.pluck(1)) == set([10, 20, 30, 40])
assert set(b.pluck([1, 0])) == set([(10, 1), (20, 2), (30, 3), (40, 4)])
def test_pluck_with_default():
b = db.from_sequence(['Hello', '', 'World'])
assert raises(IndexError, lambda: list(b.pluck(0)))
assert list(b.pluck(0, None)) == ['H', None, 'W']
def test_fold_computation():
assert int(b.fold(add)) == sum(L)
def test_distinct():
assert sorted(b.distinct()) == [0, 1, 2, 3, 4]
def test_frequencies():
assert dict(list(b.frequencies())) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
def test_topk():
assert list(b.topk(4)) == [4, 4, 4, 3]
assert list(b.topk(4, key=lambda x: -x)) == [0, 0, 0, 1]
def test_topk_with_non_callable_key():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)]
assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)]
def test_lambdas():
assert list(b.map(lambda x: x + 1)) == list(b.map(inc))
def test_reductions():
assert int(b.count()) == 15
assert int(b.sum()) == 30
assert int(b.max()) == 4
assert int(b.min()) == 0
assert int(b.any()) == True
assert int(b.all()) == False # some zeros exist
def test_mean():
assert b.mean().compute(get=dask.get) == 2.0
assert float(b.mean()) == 2.0
def test_std():
assert b.std().compute(get=dask.get) == math.sqrt(2.0)
assert float(b.std()) == math.sqrt(2.0)
def test_var():
assert b.var().compute(get=dask.get) == 2.0
assert float(b.var()) == 2.0
def test_join():
assert list(b.join([1, 2, 3], on_self=isodd, on_other=iseven)) == \
list(join(iseven, [1, 2, 3], isodd, list(b)))
assert list(b.join([1, 2, 3], isodd)) == \
list(join(isodd, [1, 2, 3], isodd, list(b)))
def test_foldby():
c = b.foldby(iseven, add, 0, add, 0)
assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
c = b.foldby(iseven, lambda acc, x: acc + x)
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
def test_map_partitions():
assert list(b.map_partitions(len)) == [5, 5, 5]
def test_lazify_task():
task = (sum, (reify, (map, inc, [1, 2, 3])))
assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))
task = (reify, (map, inc, [1, 2, 3]))
assert lazify_task(task) == task
a = (reify, (map, inc,
(reify, (filter, iseven, 'y'))))
b = (reify, (map, inc,
(filter, iseven, 'y')))
assert lazify_task(a) == b
f = lambda x: x
def test_lazify():
a = {'x': (reify, (map, inc,
(reify, (filter, iseven, 'y')))),
'a': (f, 'x'), 'b': (f, 'x')}
b = {'x': (reify, (map, inc,
(filter, iseven, 'y'))),
'a': (f, 'x'), 'b': (f, 'x')}
assert lazify(a) == b
def test_take():
assert list(b.take(2)) == [0, 1]
assert b.take(2) == (0, 1)
assert isinstance(b.take(2, compute=False), Bag)
def test_map_is_lazy():
from dask.bag.core import map
assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)
def test_can_use_dict_to_make_concrete():
assert isinstance(dict(b.frequencies()), dict)
def test_from_url():
a = db.from_url(['http://google.com', 'http://github.com'])
assert a.npartitions == 2
a.compute()
b = db.from_url('http://raw.githubusercontent.com/ContinuumIO/dask/master/README.rst')
assert b.npartitions == 1
assert b'Dask\n' in b.compute()
def test_from_filenames():
with filetexts({'a1.log': 'A\nB', 'a2.log': 'C\nD'}) as fns:
assert set(line.strip() for line in db.from_filenames(fns)) == \
set('ABCD')
assert set(line.strip() for line in db.from_filenames('a*.log')) == \
set('ABCD')
assert raises(ValueError, lambda: db.from_filenames('non-existent-*-path'))
def test_from_filenames_gzip():
b = db.from_filenames(['foo.json.gz', 'bar.json.gz'])
assert (set(b.dask.values()) ==
set([(list, (gzip.open, os.path.abspath('foo.json.gz'))),
(list, (gzip.open, os.path.abspath('bar.json.gz')))]))
def test_from_filenames_bz2():
b = db.from_filenames(['foo.json.bz2', 'bar.json.bz2'])
assert (set(b.dask.values()) ==
set([(list, (bz2.BZ2File, os.path.abspath('foo.json.bz2'))),
(list, (bz2.BZ2File, os.path.abspath('bar.json.bz2')))]))
def test_from_sequence():
b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)
assert len(b.dask) == 3
assert set(b) == set([1, 2, 3, 4, 5])
def test_from_long_sequence():
L = list(range(1001))
b = db.from_sequence(L)
assert set(b) == set(L)
def test_product():
b2 = b.product(b)
assert b2.npartitions == b.npartitions**2
assert set(b2) == set([(i, j) for i in L for j in L])
x = db.from_sequence([1, 2, 3, 4])
y = db.from_sequence([10, 20, 30])
z = x.product(y)
assert set(z) == set([(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]])
def test_collect():
a = PBag(identity, 2)
with a:
a.extend([0, 1, 2, 3])
b = PBag(identity, 2)
with b:
b.extend([0, 1, 2, 3])
result = merge(dict(collect(identity, 2, 0, [a, b])),
dict(collect(identity, 2, 1, [a, b])))
assert result == {0: [0, 0],
1: [1, 1],
2: [2, 2],
3: [3, 3]}
def test_groupby():
result = dict(b.groupby(lambda x: x))
assert result == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert b.groupby(lambda x: x).npartitions == b.npartitions
def test_groupby_with_indexer():
b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])
result = dict(b.groupby(0))
assert result == {1: [[1, 2, 3], [1, 4, 9]],
2: [[2, 3, 4]]}
def test_groupby_with_npartitions_changed():
result = b.groupby(lambda x: x, npartitions=1)
assert dict(result) == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert result.npartitions == 1
def test_concat():
b = db.from_sequence([1, 2, 3]).map(lambda x: x * [1, 2, 3])
assert list(b.concat()) == [1, 2, 3] * sum([1, 2, 3])
def test_args():
c = b.map(lambda x: x + 1)
d = Bag(*c._args)
assert list(c) == list(d)
assert c.npartitions == d.npartitions
def test_to_dataframe():
try:
import dask.dataframe
import pandas as pd
except ImportError:
return
b = db.from_sequence([(1, 2), (10, 20), (100, 200)], npartitions=2)
df = b.to_dataframe()
assert list(df.columns) == list(pd.DataFrame(list(b)).columns)
df = b.to_dataframe(columns=['a', 'b'])
assert df.npartitions == b.npartitions
assert list(df.columns) == ['a', 'b']
assert df.a.compute().values.tolist() == list(b.pluck(0))
assert df.b.compute().values.tolist() == list(b.pluck(1))
b = db.from_sequence([{'a': 1, 'b': 2},
{'a': 10, 'b': 20},
{'a': 100, 'b': 200}], npartitions=2)
df2 = b.to_dataframe()
assert (df2.compute().values == df.compute().values).all()
def test_to_textfiles():
b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
for ext, myopen in [('gz', gzip.open), ('bz2', bz2.BZ2File), ('', open)]:
c = b.to_textfiles('_foo/*.' + ext)
assert c.npartitions == b.npartitions
try:
c.compute(get=dask.get)
assert os.path.exists('_foo/1.' + ext)
f = myopen('_foo/1.' + ext, 'r')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode()
assert 'xyz' in text
f.close()
finally:
shutil.rmtree('_foo')
def test_to_textfiles_inputs():
B = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpfile() as a:
with tmpfile() as b:
B.to_textfiles([a, b]).compute()
assert os.path.exists(a)
assert os.path.exists(b)
with tmpfile() as dirname:
B.to_textfiles(dirname).compute()
assert os.path.exists(dirname)
assert os.path.exists(os.path.join(dirname, '0.part'))
assert raises(ValueError, lambda: B.to_textfiles(5))
def test_bz2_stream():
text = '\n'.join(map(str, range(10000)))
compressed = bz2.compress(text.encode())
assert list(take(100, bz2_stream(compressed))) == list(map(str, range(100)))
def test_concat():
a = db.from_sequence([1, 2, 3])
b = db.from_sequence([4, 5, 6])
c = db.concat([a, b])
assert list(c) == [1, 2, 3, 4, 5, 6]
def test_string_namespace():
b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert 'split' in dir(b.str)
assert 'match' in dir(b.str)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
assert list(b.str.split(' ')) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
assert list(b.str.match('*Smith')) == ['Alice Smith', 'Charlie Smith']
assert raises(AttributeError, lambda: b.str.sfohsofhf)
def test_string_namespace_with_unicode():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
def test_str_empty_split():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.split()) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
def test_stream_decompress():
data = 'abc\ndef\n123'.encode()
assert [s.strip() for s in stream_decompress('', data)] == \
['abc', 'def', '123']
assert [s.strip() for s in stream_decompress('bz2', bz2.compress(data))] == \
['abc', 'def', '123']
with tmpfile() as fn:
f = gzip.open(fn, 'wb')
f.write(data)
f.close()
with open(fn, 'rb') as f:
compressed = f.read()
assert [s.strip() for s in stream_decompress('gz', compressed)] == \
[b'abc', b'def', b'123']
def test_map_with_iterator_function():
b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2)
def f(L):
for x in L:
yield x + 1
c = b.map(f)
assert list(c) == [[2, 3, 4], [5, 6, 7]]
def test_ensure_compute_output_is_concrete():
b = db.from_sequence([1, 2, 3])
result = b.map(lambda x: x + 1).compute()
assert not isinstance(result, Iterator)
|
|
# -*- encoding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2014 Vincent Gauthier Telecom SudParis.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -------------------------------------------------------------------------------
__author__ = """\n""".join(['Vincent Gauthier <[email protected]>'])
#
# Imports
#
import os
import argparse
import pickle as p
import numpy as np
from progressbar import ProgressBar, Percentage, RotatingMarker, ETA, Bar
# import global variables
import properties
# local import
from utils import *
from tau_leap_latent import stoc_eqs
###############################################################################
#
# Function rate_of_return
#
###############################################################################
def rate_of_return(dim, rate, Khi, degree_filename):
with open(degree_filename, 'rb') as pickleFile:
k = p.load(pickleFile)
kmean = np.mean(np.array(k, dtype=np.float))
rho = np.zeros((dim, dim))
for i in xrange(dim):
for j in xrange(dim):
if i != j:
if k[j] == 0:
rho[i, j] = rate
else:
rho[i, j] = 1.0 / \
((1.0 / rate) * (k[j] ** Khi) / (kmean ** Khi))
return rho
###############################################################################
#
# MAIN FUNCTION THAT RUN THE SIMULATION
#
###############################################################################
def run_simulation(
N0,
dim,
tau,
beta,
sigma,
nu,
rho,
total_population,
simulation_end_time,
alphaS,
alphaI,
alphaR,
muS,
muI,
muR,
deltaEI,
initialInfectedCommunity):
# Steps
steps = int(simulation_end_time * (1.0 / tau))
# Compute the initial population distribution
N = compute_population_at_equilibrium(
N0, dim, sigma, nu, rho, total_population)
print 'average population per cellid: ', np.sum(N, axis=0)
#
# init the progress bar
#
widgets = ['Simulation: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=steps).start()
#
#
#
# Inititial Population in each States
S = N.copy()
I = np.zeros((dim, dim))
R = np.zeros((dim, dim))
ES = np.zeros((dim, dim))
EI = np.zeros((dim, dim))
ER = np.zeros((dim, dim))
# Infect some nodes
initital_infection = 100.0
S[initialInfectedCommunity, initialInfectedCommunity] = S[
initialInfectedCommunity, initialInfectedCommunity] - initital_infection
I[initialInfectedCommunity, initialInfectedCommunity] = initital_infection
# Stack the differents S.I.R. variables in one vector
Y = S.reshape(dim * dim).tolist()
Y = np.append(Y, I.reshape(dim * dim).tolist())
Y = np.append(Y, R.reshape(dim * dim).tolist())
Y = np.append(Y, ES.reshape(dim * dim).tolist())
Y = np.append(Y, EI.reshape(dim * dim).tolist())
Y = np.append(Y, ER.reshape(dim * dim).tolist())
Sr = []
Ir = []
Rr = []
ESr = []
EIr = []
ERr = []
InfectionMatrix = np.zeros((steps, 255))
for step in xrange(steps):
Ytemp = stoc_eqs(Y, tau, beta, gamma, sigma, nu, rho,
dim, alphaS, alphaI, alphaR, muS, muI, muR, deltaEI)
Ytemp = Ytemp.reshape((6, dim * dim))
Stemp = Ytemp[0].reshape((dim, dim))
Itemp = Ytemp[1].reshape((dim, dim))
Rtemp = Ytemp[2].reshape((dim, dim))
Sr.append(Stemp.sum())
Ir.append(Itemp.sum())
Rr.append(Rtemp.sum())
EStemp = Ytemp[3].reshape((dim, dim))
EItemp = Ytemp[4].reshape((dim, dim))
ERtemp = Ytemp[5].reshape((dim, dim))
ESr.append(EStemp.sum())
EIr.append(EItemp.sum())
ERr.append(ERtemp.sum())
InfectionMatrix[step, :] = Itemp.sum(axis=0)
Y = Ytemp
pbar.update(step)
pbar.finish()
return Sr, Ir, Rr, ESr, EIr, ERr, InfectionMatrix
if __name__ == '__main__':
#
# Parse argument
#
parser = argparse.ArgumentParser(
description='Process SIR simulation with latent states and heterogeneous return probability')
parser.add_argument('--output', help='output directory', required=True)
parser.add_argument(
'--duration', type=int, help='simulation duration in days', required=True)
parser.add_argument(
'--tau', type=float, help='simulation step (fraction of day)', default=1.0 / 5)
parser.add_argument(
'--mu', type=float, help='simulation mu for latent state (fraction of the population)', default=1.0 / 10)
parser.add_argument(
'--sim-id', type=int, help='simulation step (fraction of day)', default=1.0 / 5)
parser.add_argument(
'--cell-id', type=int, help='initial cellID', default=0)
parser.add_argument(
'--gamma', type=float, help='recovery rate', default=1.0 / 3.0)
parser.add_argument(
'--khi', type=float, help='khi recovery rate', default=-0.5)
args = parser.parse_args()
# Simualtion parameters
simulation_end_time = float(args.duration)
tau = float(args.tau)
muS = float(args.mu)
muI = float(args.mu)
muR = float(args.mu)
gamma = float(args.gamma)
khi = float(args.khi)
simulation_id = int(args.sim_id)
cell_id = args.cell_id
deltaEI = gamma
argsdict = vars(args)
conditions_met = (
args.output and
args.mu and
args.tau and
args.duration and
args.mu and
args.sim_id
)
if conditions_met:
output_dir = argsdict['output']
if output_dir.endswith('\\'):
output_dir = output_dir[:-1]
# if output dire doesn' extist create it
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#
# Start Simulation
#
beta = get_beta(
properties.densitySubPrefectureCensusData,
properties.polygonPointsSubPrefectureCensusData,
properties.subPrefectureNumbering,
properties.r,
properties.c)
with np.errstate(divide='ignore'):
(nu, sigma) = get_transition_probability(
properties.transitionProbability)
rho = rate_of_return(
properties.dim,
properties.return_rate,
khi,
properties.graphDegree)
N0 = initial_population(
properties.areaSubPrefectureCensusData,
properties.densitySubPrefectureCensusData,
properties.polygonPointsSubPrefectureCensusData,
properties.subPrefectureNumbering,
properties.total_population)
#
# Simulation community=0
#
S, I, R, ES, EI, ER, InfectionMatrix = run_simulation(N0,
properties.dim,
tau,
beta,
sigma,
nu,
rho,
properties.total_population,
simulation_end_time,
properties.alphaS,
properties.alphaI,
properties.alphaR,
muS,
muI,
muR,
deltaEI,
cell_id)
A = InfectionMatrix.T
save_results(
S, I, R, ES, EI, ER, A, output_dir + '/' + str(simulation_id))
#####################
#
# end Simulation
#
else:
parser.print_help()
|
|
# Copyright 2016 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import time
import json
import numpy as np
import pytest
import makeunitdb
from ND import celery_app
from params import Params
from ndlib.restutil import *
from postmethods import postNPZ, getNPZ
from ndlib.ndtype import *
from test_settings import *
p = Params()
p.token = 'unittest'
p.channels = ['testchannel']
p.args = (0,1024,0,1024,1,11)
@pytest.mark.skipif(True, reason='Test not necessary for dev mode')
class Test_Histogram8:
def setup_class(self):
"""Create the unittest database"""
makeunitdb.createTestDB(p.token, p.channels, channel_type=IMAGE, channel_datatype=UINT8, public=True, ximagesize=1024, yimagesize=1024, zimagesize=10, xvoxelres=1.0, yvoxelres=1.0, zvoxelres=10.0, readonly=0)
def teardown_class (self):
"""Destroy the unittest database"""
makeunitdb.deleteTestDB(p.token)
def test_genhistogram (self):
"""Test generating an 8bit histogram"""
image_data = np.random.randint(0, high=255, size=[1, 10, 1024, 1024]).astype(np.uint8)
response = postNPZ(p, image_data)
assert( response.status_code == 200 )
voxarray = getNPZ(p)
# check that the return data matches
assert( np.array_equal(voxarray, image_data) )
# generate the histogram
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Build a get request
response = getURL(url)
except Exception as e:
print e
assert( response.status_code == 200 )
jsonresponse = json.loads(response.content)
# make sure the celery job started
celerystatus = celery_app.AsyncResult(jsonresponse['jobid'])
# wait for histogram generation to finish (either 10 mins or failure)
# note: actual generation time should be more like 0.2 seconds, but there may be other jobs in the queue
count = 0
while celerystatus.state != 'SUCCESS':
time.sleep(1)
celerystatus = celery_app.AsyncResult(jsonresponse['jobid'])
assert( celerystatus.state != 'FAILURE' )
assert( count != 60 )
count += 1
# now get the histogram
url = 'https://{}/stats/{}/{}/hist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Build a get request
response = getURL(url)
except Exception as e:
print e
assert( response.status_code == 200 )
jsonresponse = json.loads(response.content)
# now see if the two histograms are equivalent
testhist = np.histogram(image_data[image_data > 0], bins=256, range=(0,256))
# check to see that the bins are equal
assert( np.array_equal( jsonresponse['bins'], testhist[1] ) )
# check to see that the counts are equal
assert( np.array_equal( jsonresponse['hist'], testhist[0] ) )
@pytest.mark.skipif(True, reason='Test not necessary for dev mode')
class Test_Histogram16:
def setup_class(self):
"""Create the unittest database"""
makeunitdb.createTestDB(p.token, p.channels, channel_type=IMAGE, channel_datatype=UINT16, public=True, ximagesize=1024, yimagesize=1024, zimagesize=10, xvoxelres=1.0, yvoxelres=1.0, zvoxelres=10.0, readonly=0)
def teardown_class (self):
"""Destroy the unittest database"""
makeunitdb.deleteTestDB(p.token)
def test_genhistogram (self):
"""Test generating an 8bit histogram"""
image_data = np.random.randint(0, high=65535, size=[1, 10, 1024, 1024]).astype(np.uint16)
response = postNPZ(p, image_data)
assert( response.status_code == 200 )
voxarray = getNPZ(p)
# check that the return data matches
assert( np.array_equal(voxarray, image_data) )
# generate the histogram
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Build a get request
response = getURL(url)
except Exception as e:
print e
assert( response.status_code == 200 )
jsonresponse = json.loads(response.content)
# make sure the celery job started
celerystatus = celery_app.AsyncResult(jsonresponse['jobid'])
# wait for histogram generation to finish (either 10 mins or failure)
# note: actual generation time should be more like 1.3 seconds, but there may be other jobs in the queue
count = 0
while celerystatus.state != 'SUCCESS':
time.sleep(1)
celerystatus = celery_app.AsyncResult(jsonresponse['jobid'])
assert( celerystatus.state != 'FAILURE' )
assert( count != 60 )
count += 1
# now get the histogram
url = 'https://{}/stats/{}/{}/hist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Build a get request
response = getURL(url)
except Exception as e:
print e
assert( response.status_code == 200 )
jsonresponse = json.loads(response.content)
# now see if the two histograms are equivalent
testhist = np.histogram(image_data[image_data > 0], bins=65536, range=(0,65536))
# check to see that the bins are equal
assert( np.array_equal( jsonresponse['bins'], testhist[1] ) )
# check to see that the counts are equal
assert( np.array_equal( jsonresponse['hist'], testhist[0] ) )
@pytest.mark.skipif(True, reason='Test not necessary for dev mode')
class TestHistogramROI:
def setup_class(self):
"""Create the unittest database"""
makeunitdb.createTestDB(p.token, p.channels, channel_type=IMAGE, channel_datatype=UINT8, public=True, ximagesize=1024, yimagesize=1024, zimagesize=20, xvoxelres=1.0, yvoxelres=1.0, zvoxelres=10.0, readonly=0)
# modify params args to match new data size
p.args = (0,1024,0,1024,1,21)
# post some sample image data
self.image_data = np.random.randint(0, high=255, size=[1, 20, 1024, 1024]).astype(np.uint8)
response = postNPZ(p, self.image_data)
assert( response.status_code == 200 )
voxarray = getNPZ(p)
# check that the return data matches
assert( np.array_equal(voxarray, self.image_data) )
def teardown_class (self):
"""Destroy the unittest database"""
makeunitdb.deleteTestDB(p.token)
def test_genhistogramROI (self):
"""Test generating an 8bit histogram given an ROI"""
# set our ROI (this one spans cuboids)
roi = [ [500, 500, 5], [650, 650, 15] ]
#roistr = "{}-{}".format( ",".join(str(x) for x in roi[0]), ",".join(str(x) for x in roi[1]) )
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Make a POST request
req = urllib2.Request(url, json.dumps({ 'ROI': [roi] }))
response = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print e
assert(e.reason == 0)
assert( response.code == 200 )
# jsonresponse for ROI returns job info, etc in a list in 'results'
jsonresponse = json.loads(response.read())
# make sure the ROI was received and transcribed correctly
assert( jsonresponse['results'][0]['roi'] == roi )
# make sure the celery job started
celerystatus = celery_app.AsyncResult(jsonresponse['results'][0]['jobid'])
# wait for histogram generation to finish (either 10 mins or failure)
# note: actual generation time should be more like 1.3 seconds, but there may be other jobs in the queue
count = 0
while celerystatus.state != 'SUCCESS':
time.sleep(1)
celerystatus = celery_app.AsyncResult(jsonresponse['results'][0]['jobid'])
assert( celerystatus.state != 'FAILURE' )
assert( count != 60 )
count += 1
# make sure the ROI exists
url = 'https://{}/stats/{}/{}/hist/roi/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Build a get request
req = urllib2.Request(url)
response = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print e
assert(e.reason == 0)
jsonresponse = json.loads(response.read())
roifound = False
for roiresp in jsonresponse:
if roiresp == roi:
roifound = True
assert( roifound == True )
# now grab the generated histogram using a get request
roistr = "{}-{}".format( ",".join(str(x) for x in roi[0]), ",".join(str(x) for x in roi[1]) )
url = 'https://{}/stats/{}/{}/hist/roi/{}/'.format( SITE_HOST, p.token, p.channels[0], roistr )
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e
assert(e.reason == 0)
jsonresponse = json.loads(response.read())
# now see if the two histograms are equivalents
image_data_roi = self.image_data[0, roi[0][2]:roi[1][2], roi[0][1]:roi[1][1], roi[0][0]:roi[1][0]]
testhist = np.histogram(image_data_roi[image_data_roi > 0], bins=256, range=(0,256))
# check to see that the bins are equal
assert( np.array_equal( jsonresponse['bins'], testhist[1] ) )
# check to see that the counts are equal
assert( np.array_equal( jsonresponse['hist'], testhist[0] ) )
def test_genhistogramROICuboid (self):
""" Test generating an 8bit histogram using an ROI inside a single cuboid """
roi = [ [0, 0, 1], [10, 10, 6] ]
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Make a POST request
req = urllib2.Request(url, json.dumps({ 'ROI': [roi] }))
response = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print e
assert(e.reason == 0)
assert( response.code == 200 )
# jsonresponse for ROI returns job info, etc in a list in 'results'
jsonresponse = json.loads(response.read())
# make sure the ROI was received and transcribed correctly
assert( jsonresponse['results'][0]['roi'] == roi )
# make sure the celery job started
celerystatus = celery_app.AsyncResult(jsonresponse['results'][0]['jobid'])
# wait for histogram generation to finish (either 10 mins or failure)
# note: actual generation time should be more like 1.3 seconds, but there may be other jobs in the queue
count = 0
while celerystatus.state != 'SUCCESS':
time.sleep(1)
celerystatus = celery_app.AsyncResult(jsonresponse['results'][0]['jobid'])
assert( celerystatus.state != 'FAILURE' )
assert( count != 60 )
count += 1
# now grab the generated histogram using a get request
roistr = "{}-{}".format( ",".join(str(x) for x in roi[0]), ",".join(str(x) for x in roi[1]) )
url = 'https://{}/stats/{}/{}/hist/roi/{}/'.format( SITE_HOST, p.token, p.channels[0], roistr )
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e
assert(e.reason == 0)
jsonresponse = json.loads(response.read())
# now see if the two histograms are equivalents
image_data_roi = self.image_data[0, roi[0][2]:roi[1][2], roi[0][1]:roi[1][1], roi[0][0]:roi[1][0]]
testhist = np.histogram(image_data_roi[image_data_roi > 0], bins=256, range=(0,256))
# check to see that the bins are equal
assert( np.array_equal( jsonresponse['bins'], testhist[1] ) )
# check to see that the counts are equal
assert( np.array_equal( jsonresponse['hist'], testhist[0] ) )
def test_genhistogramROICuboidEnd (self):
""" Test generating an 8bit histogram using an ROI inside a single cuboid at the end of the dataset """
roi = [ [1000, 1000, 18], [1024, 1024, 21] ]
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Make a POST request
req = urllib2.Request(url, json.dumps({ 'ROI': [roi] }))
response = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print e
assert(e.reason == 0)
assert( response.code == 200 )
# jsonresponse for ROI returns job info, etc in a list in 'results'
jsonresponse = json.loads(response.read())
# make sure the ROI was received and transcribed correctly
assert( jsonresponse['results'][0]['roi'] == roi )
# make sure the celery job started
celerystatus = celery_app.AsyncResult(jsonresponse['results'][0]['jobid'])
# wait for histogram generation to finish (either 10 mins or failure)
# note: actual generation time should be more like 1.3 seconds, but there may be other jobs in the queue
count = 0
while celerystatus.state != 'SUCCESS':
time.sleep(1)
celerystatus = celery_app.AsyncResult(jsonresponse['results'][0]['jobid'])
assert( celerystatus.state != 'FAILURE' )
assert( count != 60 )
count += 1
# now grab the generated histogram using a get request
roistr = "{}-{}".format( ",".join(str(x) for x in roi[0]), ",".join(str(x) for x in roi[1]) )
url = 'https://{}/stats/{}/{}/hist/roi/{}/'.format( SITE_HOST, p.token, p.channels[0], roistr )
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e
assert(e.reason == 0)
jsonresponse = json.loads(response.read())
# now see if the two histograms are equivalents
image_data_roi = self.image_data[0, roi[0][2]:roi[1][2], roi[0][1]:roi[1][1], roi[0][0]:roi[1][0]]
testhist = np.histogram(image_data_roi[image_data_roi > 0], bins=256, range=(0,256))
# check to see that the bins are equal
assert( np.array_equal( jsonresponse['bins'], testhist[1] ) )
# check to see that the counts are equal
assert( np.array_equal( jsonresponse['hist'], testhist[0] ) )
def test_genhistogramROIError(self):
""" Test error checking in the ROI histogram service """
# post ROI that isn't complete
roi = [ 50, 100, 100]
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Make a POST request
req = urllib2.Request(url, json.dumps({ 'ROI': [roi] }))
response = urllib2.urlopen(req)
assert( response.code != 200 )
except urllib2.HTTPError,e:
print e
assert(e.code == 400)
# post ROI that isn't a cube
roi = [ [50, 50, 18], [10, 10, 5] ]
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Make a POST request
req = urllib2.Request(url, json.dumps({ 'ROI': [roi] }))
response = urllib2.urlopen(req)
assert( response.code != 200 )
except urllib2.HTTPError,e:
print e
assert(e.code == 400)
# post ROI outside of dataset bounds
roi = [ [0, 0, 1], [2000, 2000, 50] ]
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Make a POST request
req = urllib2.Request(url, json.dumps({ 'ROI': [roi] }))
response = urllib2.urlopen(req)
assert( response.code != 200 )
except urllib2.HTTPError,e:
print e
assert(e.code == 400)
@pytest.mark.skipif(True, reason='Test not necessary for dev mode')
class TestHistogramROIMultiple:
def setup_class(self):
"""Create the unittest database"""
makeunitdb.createTestDB(p.token, p.channels, channel_type=IMAGE, channel_datatype=UINT8, public=True, ximagesize=1024, yimagesize=1024, zimagesize=20, xvoxelres=1.0, yvoxelres=1.0, zvoxelres=10.0, readonly=0)
# modify params args to match new data size
p.args = (0,1024,0,1024,1,21)
# post some sample image data
self.image_data = np.random.randint(0, high=255, size=[1, 20, 1024, 1024]).astype(np.uint8)
response = postNPZ(p, self.image_data)
assert( response.code == 200 )
voxarray = getNPZ(p)
# check that the return data matches
assert( np.array_equal(voxarray, self.image_data) )
def teardown_class (self):
"""Destroy the unittest database"""
makeunitdb.deleteTestDB(p.token)
def test_genhistogramROIMultiple (self):
"""Test generating an 8bit histogram given multiple ROIs"""
# set our ROIs (this one spans cuboids)
rois = [
[ [100, 100, 5], [450, 450, 15] ],
[ [500, 500, 5], [650, 650, 15] ],
[ [100, 100, 15], [350, 350, 20] ],
]
url = 'https://{}/stats/{}/{}/genhist/'.format( SITE_HOST, p.token, p.channels[0] )
try:
# Make a POST request
req = urllib2.Request(url, json.dumps({ 'ROI': rois }))
response = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print e
assert(e.reason == 0)
assert( response.code == 200 )
# jsonresponse for ROI returns job info, etc in a list in 'results'
jsonresponse = json.loads(response.read())
# loop over returned ROIs
for result in jsonresponse['results']:
roi = result['roi']
# make sure the celery job started
celerystatus = celery_app.AsyncResult(result['jobid'])
# wait for histogram generation to finish (either 10 mins or failure)
# note: actual generation time should be more like 1.3 seconds, but there may be other jobs in the queue
count = 0
while celerystatus.state != 'SUCCESS':
time.sleep(1)
celerystatus = celery_app.AsyncResult(result['jobid'])
assert( celerystatus.state != 'FAILURE' )
assert( count != 60 )
count += 1
# grab the generated histogram using a get request
roistr = "{}-{}".format( ",".join(str(x) for x in roi[0]), ",".join(str(x) for x in roi[1]) )
url = 'https://{}/stats/{}/{}/hist/roi/{}/'.format( SITE_HOST, p.token, p.channels[0], roistr )
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e
assert(e.reason == 0)
jsonresponse = json.loads(response.read())
# now see if the two histograms are equivalents
image_data_roi = self.image_data[0, roi[0][2]:roi[1][2], roi[0][1]:roi[1][1], roi[0][0]:roi[1][0]]
testhist = np.histogram(image_data_roi[image_data_roi > 0], bins=256, range=(0,256))
# check to see that the bins are equal
assert( np.array_equal( jsonresponse['bins'], testhist[1] ) )
# check to see that the counts are equal
assert( np.array_equal( jsonresponse['hist'], testhist[0] ) )
|
|
"""
Commands that are available from the connect screen.
"""
import re
import traceback
import time
from collections import defaultdict
from random import getrandbits
from django.conf import settings
from evennia.players.models import PlayerDB
from evennia.objects.models import ObjectDB
from evennia.server.models import ServerConfig
from evennia.comms.models import ChannelDB
from evennia.utils import create, logger, utils, ansi
from evennia.commands.default.muxcommand import MuxCommand
from evennia.commands.cmdhandler import CMD_LOGINSTART
# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate",
"CmdUnconnectedQuit", "CmdUnconnectedLook", "CmdUnconnectedHelp")
MULTISESSION_MODE = settings.MULTISESSION_MODE
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
# Helper function to throttle failed connection attempts.
# This can easily be used to limit player creation too,
# (just supply a different storage dictionary), but this
# would also block dummyrunner, so it's not added as default.
_LATEST_FAILED_LOGINS = defaultdict(list)
def _throttle(session, maxlim=None, timeout=None,
storage=_LATEST_FAILED_LOGINS):
"""
This will check the session's address against the
_LATEST_LOGINS dictionary to check they haven't
spammed too many fails recently.
Args:
session (Session): Session failing
maxlim (int): max number of attempts to allow
timeout (int): number of timeout seconds after
max number of tries has been reached.
Returns:
throttles (bool): True if throttling is active,
False otherwise.
Notes:
If maxlim and/or timeout are set, the function will
just do the comparison, not append a new datapoint.
"""
address = session.address
if isinstance(address, tuple):
address = address[0]
now = time.time()
if maxlim and timeout:
# checking mode
latest_fails = storage[address]
if latest_fails and len(latest_fails) >= maxlim:
# too many fails recently
if now - latest_fails[-1] < timeout:
# too soon - timeout in play
return True
else:
# timeout has passed. Reset faillist
storage[address] = []
return False
else:
# store the time of the latest fail
storage[address].append(time.time())
return False
class CmdUnconnectedConnect(MuxCommand):
"""
connect to the game
Usage (at login screen):
connect playername password
connect "player name" "pass word"
Use the create command to first create an account before logging in.
If you have spaces in your name, enclose it in quotes.
"""
key = "connect"
aliases = ["conn", "con", "co"]
locks = "cmd:all()" # not really needed
arg_regex = r"\s.*?|$"
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their func() receives
a session object instead of a source_object like all
other types of logged-in commands (this is because
there is no object yet before the player has logged in)
"""
session = self.caller
# check for too many login errors too quick.
if _throttle(session, maxlim=5, timeout=5*60, storage=_LATEST_FAILED_LOGINS):
# timeout is 5 minutes.
session.msg("{RYou made too many connection attempts. Try again in a few minutes.{n")
return
args = self.args
# extract quoted parts
parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found, or a guest login
parts = parts[0].split(None, 1)
# Guest login
if len(parts) == 1 and parts[0].lower() == "guest" and settings.GUEST_ENABLED:
try:
# Find an available guest name.
for playername in settings.GUEST_LIST:
if not PlayerDB.objects.filter(username__iexact=playername):
break
playername = None
if playername == None:
session.msg("All guest accounts are in use. Please try again later.")
return
password = "%016x" % getrandbits(64)
home = ObjectDB.objects.get_id(settings.GUEST_HOME)
permissions = settings.PERMISSION_GUEST_DEFAULT
typeclass = settings.BASE_CHARACTER_TYPECLASS
ptypeclass = settings.BASE_GUEST_TYPECLASS
new_player = _create_player(session, playername, password,
permissions, ptypeclass)
if new_player:
_create_character(session, new_player, typeclass,
home, permissions)
session.sessionhandler.login(session, new_player)
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
string = "%s\nThis is a bug. Please e-mail an admin if the problem persists."
session.msg(string % (traceback.format_exc()))
logger.log_errmsg(traceback.format_exc())
finally:
return
if len(parts) != 2:
session.msg("\n\r Usage (without <>): connect <name> <password>")
return
playername, password = parts
# Match account name and check password
player = PlayerDB.objects.get_player_from_name(playername)
pswd = None
if player:
pswd = player.check_password(password)
if not (player and pswd):
# No playername or password match
string = "Wrong login information given.\nIf you have spaces in your name or " \
"password, don't forget to enclose it in quotes. Also capitalization matters." \
"\nIf you are new you should first create a new account " \
"using the 'create' command."
session.msg(string)
# this just updates the throttle
_throttle(session, storage=_LATEST_FAILED_LOGINS)
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name.lower() for tup in bans)
or
any(tup[2].match(session.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.execute_cmd("quit")
return
# actually do the login. This will call all other hooks:
# session.at_login()
# player.at_init() # always called when object is loaded from disk
# player.at_pre_login()
# player.at_first_login() # only once
# player.at_post_login(sessid=sessid)
session.sessionhandler.login(session, player)
class CmdUnconnectedCreate(MuxCommand):
"""
create a new player account
Usage (at login screen):
create <playername> <password>
create "player name" "pass word"
This creates a new player account.
If you have spaces in your name, enclose it in quotes.
"""
key = "create"
aliases = ["cre", "cr"]
locks = "cmd:all()"
arg_regex = r"\s.*?|$"
def func(self):
"Do checks and create account"
session = self.caller
args = self.args.strip()
# extract quoted parts
parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found
parts = parts[0].split(None, 1)
if len(parts) != 2:
string = "\n Usage (without <>): create <name> <password>" \
"\nIf <name> or <password> contains spaces, enclose it in quotes."
session.msg(string)
return
playername, password = parts
# sanity checks
if not re.findall('^[\w. @+-]+$', playername) or not (0 < len(playername) <= 30):
# this echoes the restrictions made by django's auth
# module (except not allowing spaces, for convenience of
# logging in).
string = "\n\r Playername can max be 30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only."
session.msg(string)
return
# strip excessive spaces in playername
playername = re.sub(r"\s+", " ", playername).strip()
if PlayerDB.objects.filter(username__iexact=playername):
# player already exists (we also ignore capitalization here)
session.msg("Sorry, there is already a player with the name '%s'." % playername)
return
# Reserve playernames found in GUEST_LIST
if settings.GUEST_LIST and playername.lower() in map(str.lower, settings.GUEST_LIST):
string = "\n\r That name is reserved. Please choose another Playername."
session.msg(string)
return
if not re.findall('^[\w. @+-]+$', password) or not (3 < len(password)):
string = "\n\r Password should be longer than 3 characers. Letters, spaces, digits and @\.\+\-\_ only." \
"\nFor best security, make it longer than 8 characters. You can also use a phrase of" \
"\nmany words if you enclose the password in quotes."
session.msg(string)
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==playername.lower() for tup in bans)
or
any(tup[2].match(session.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.execute_cmd("quit")
return
# everything's ok. Create the new player account.
try:
permissions = settings.PERMISSION_PLAYER_DEFAULT
typeclass = settings.BASE_CHARACTER_TYPECLASS
new_player = _create_player(session, playername, password, permissions)
if new_player:
if MULTISESSION_MODE < 2:
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
_create_character(session, new_player, typeclass,
default_home, permissions)
# tell the caller everything went well.
string = "A new account '%s' was created. Welcome!"
if " " in playername:
string += "\n\nYou can now log in with the command 'connect \"%s\" <your password>'."
else:
string += "\n\nYou can now log with the command 'connect %s <your password>'."
session.msg(string % (playername, playername))
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
string = "%s\nThis is a bug. Please e-mail an admin if the problem persists."
session.msg(string % (traceback.format_exc()))
logger.log_errmsg(traceback.format_exc())
class CmdUnconnectedQuit(MuxCommand):
"""
quit when in unlogged-in state
Usage:
quit
We maintain a different version of the quit command
here for unconnected players for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "quit"
aliases = ["q", "qu"]
locks = "cmd:all()"
def func(self):
"Simply close the connection."
session = self.caller
#session.msg("Good bye! Disconnecting ...")
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
class CmdUnconnectedLook(MuxCommand):
"""
look when in unlogged-in state
Usage:
look
This is an unconnected version of the look command for simplicity.
This is called by the server and kicks everything in gear.
All it does is display the connect screen.
"""
key = CMD_LOGINSTART
aliases = ["look", "l"]
locks = "cmd:all()"
def func(self):
"Show the connect screen."
connection_screen = ansi.parse_ansi(utils.random_string_from_module(CONNECTION_SCREEN_MODULE))
if not connection_screen:
connection_screen = "No connection screen found. Please contact an admin."
self.caller.msg(connection_screen)
class CmdUnconnectedHelp(MuxCommand):
"""
get help when in unconnected-in state
Usage:
help
This is an unconnected version of the help command,
for simplicity. It shows a pane of info.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
def func(self):
"Shows help"
string = \
"""
You are not yet logged into the game. Commands available at this point:
{wcreate{n - create a new account
{wconnect{n - connect with an existing account
{wlook{n - re-show the connection screen
{whelp{n - show this help
{wencoding{n - change the text encoding to match your client
{wscreenreader{n - make the server more suitable for use with screen readers
{wquit{n - abort the connection
First create an account e.g. with {wcreate Anna c67jHL8p{n
(If you have spaces in your name, use quotes: {wcreate "Anna the Barbarian" c67jHL8p{n
Next you can connect to the game: {wconnect Anna c67jHL8p{n
You can use the {wlook{n command if you want to see the connect screen again.
"""
self.caller.msg(string)
class CmdUnconnectedEncoding(MuxCommand):
"""
set which text encoding to use in unconnected-in state
Usage:
encoding/switches [<encoding>]
Switches:
clear - clear your custom encoding
This sets the text encoding for communicating with Evennia. This is mostly
an issue only if you want to use non-ASCII characters (i.e. letters/symbols
not found in English). If you see that your characters look strange (or you
get encoding errors), you should use this command to set the server
encoding to be the same used in your client program.
Common encodings are utf-8 (default), latin-1, ISO-8859-1 etc.
If you don't submit an encoding, the current encoding will be displayed
instead.
"""
key = "encoding"
aliases = ("@encoding", "@encode")
locks = "cmd:all()"
def func(self):
"""
Sets the encoding.
"""
if self.session is None:
return
if 'clear' in self.switches:
# remove customization
old_encoding = self.session.encoding
if old_encoding:
string = "Your custom text encoding ('%s') was cleared." % old_encoding
else:
string = "No custom encoding was set."
self.session.encoding = "utf-8"
elif not self.args:
# just list the encodings supported
pencoding = self.session.encoding
string = ""
if pencoding:
string += "Default encoding: {g%s{n (change with {w@encoding <encoding>{n)" % pencoding
encodings = settings.ENCODINGS
if encodings:
string += "\nServer's alternative encodings (tested in this order):\n {g%s{n" % ", ".join(encodings)
if not string:
string = "No encodings found."
else:
# change encoding
old_encoding = self.session.encoding
encoding = self.args
self.session.encoding = encoding
string = "Your custom text encoding was changed from '%s' to '%s'." % (old_encoding, encoding)
self.caller.msg(string.strip())
class CmdUnconnectedScreenreader(MuxCommand):
"""
Activate screenreader mode.
Usage:
screenreader
Used to flip screenreader mode on and off before logging in (when
logged in, use @option screenreader on).
"""
key = "screenreader"
aliases = "@screenreader"
def func(self):
"Flips screenreader setting."
self.session.screenreader = not self.session.screenreader
string = "Screenreader mode turned {w%s{n." % ("on" if self.session.screenreader else "off")
self.caller.msg(string)
def _create_player(session, playername, password, permissions, typeclass=None):
"""
Helper function, creates a player of the specified typeclass.
"""
try:
new_player = create.create_player(playername, None, password,
permissions=permissions, typeclass=typeclass)
except Exception, e:
session.msg("There was an error creating the Player:\n%s\n If this problem persists, contact an admin." % e)
logger.log_trace()
return False
# This needs to be called so the engine knows this player is
# logging in for the first time. (so it knows to call the right
# hooks during login later)
utils.init_new_player(new_player)
# join the new player to the public channel
pchannel = ChannelDB.objects.get_channel(settings.DEFAULT_CHANNELS[0]["key"])
if not pchannel.connect(new_player):
string = "New player '%s' could not connect to public channel!" % new_player.key
logger.log_errmsg(string)
return new_player
def _create_character(session, new_player, typeclass, home, permissions):
"""
Helper function, creates a character based on a player's name.
This is meant for Guest and MULTISESSION_MODE < 2 situations.
"""
try:
new_character = create.create_object(typeclass, key=new_player.key,
home=home, permissions=permissions)
# set playable character list
new_player.db._playable_characters.append(new_character)
# allow only the character itself and the player to puppet this character (and Immortals).
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, new_player.id))
# If no description is set, set a default description
if not new_character.db.desc:
new_character.db.desc = "This is a Player."
# We need to set this to have @ic auto-connect to this character
new_player.db._last_puppet = new_character
except Exception, e:
session.msg("There was an error creating the Character:\n%s\n If this problem persists, contact an admin." % e)
logger.log_trace()
return False
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
import sys
import mne
import matplotlib.pyplot as plt
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import time
from functools import partial
from mne import set_config
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
#fs_dir = '/mnt/diskArray/projects/freesurfer'
fs_dir = '/mnt/diskArray/projects/avg_fsurfer'
this_env['SUBJECTS_DIR'] = fs_dir
#this_env['FREESURFER_HOME'] = '/usr/local/freesurfer'
raw_dir = '/mnt/scratch/NLR_MEG4'
os.chdir(raw_dir)
subs = ['NLR_102_RS','NLR_103_AC','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_130_RW','NLR_132_WP','NLR_133_ML','NLR_145_AC','NLR_150_MG',
'NLR_151_RD','NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_187_NB','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_206_LM','NLR_207_AH','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_JB423','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_197_BK','NLR_GB355','NLR_GB387','NLR_HB205',
'NLR_IB217','NLR_IB319','NLR_JB227','NLR_JB486','NLR_KB396',
'NLR_IB357']
#for n, s in enumerate(subs):
# run_subprocess(['mne', 'watershed_bem', '--subject', subs[n],'--overwrite'], env=this_env)
# mne.bem.make_watershed_bem(subject = subs[n],subjects_dir=fs_dir,overwrite=True,preflood=20, show=True)
"""USE above code
mri_watershed -h 3 -useSRAS -surf /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/bem/watershed/NLR_205_AC /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/mri/T1.mgz /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/bem/watershed/ws
"""
"""
Run head_surf.m
"""
# Let's take a look...
#for n, s in enumerate(subs):
# mne.viz.plot_bem(subject=subs[n],subjects_dir=fs_dir,brain_surfaces='white', orientation='coronal')
#for n, s in enumerate(subs):
## os.chdir(os.path.join(fs_dir,subs[n],'bem'))
# run_subprocess(['mne', 'make_scalp_surfaces', '--subject', subs[n],
# '--overwrite','--no-decimate']) # Disable medium and sparse decimations (dense only)
# # otherwise, it gives errors
""" Co-register...
mne.gui.coregistration(tabbed=False,subject=subs[45],subjects_dir=fs_dir)
# Recommended way is to use mne coreg from terminal
"""
# Session 1
# subs are synced up with session1 folder names...
#
session1 = ['102_rs160618','103_ac150609','105_bb150713','110_hh160608','127_am151022',
'130_rw151221','132_wp160919','133_ml151124','145_ac160621','150_mg160606',
'151_rd160620','152_tc160422','160_ek160627','161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614','174_hs160620','179_gm160701',
'180_zd160621','187_nb161017','201_gs150818','203_am150831',
'204_am150829','205_ac151123','206_lm151119','207_ah160608','211_lb160617',
'nlr_gb310170614','nlr_kb218170619','nlr_jb423170620','nlr_gb267170620','nlr_jb420170621',
'nlr_hb275170622','197_bk170622','nlr_gb355170606','nlr_gb387170608','nlr_hb205170825',
'nlr_ib217170831','nlr_ib319170825','nlr_jb227170811','nlr_jb486170803','nlr_kb396170808',
'nlr_ib357170912']
#subs = ['NLR_205_AC','NLR_206_LM',
# 'NLR_207_AH','NLR_210_SB','NLR_211_LB'
# ]
#session1 = ['205_ac151208','205_ac160202',
# '206_lm151119',
# '206_lm160113','207_ah160608','207_ah160809','210_sb160822','211_lb160617','211_lb160823'
# ]
#n_subjects = len(subs)
"""
Forward model...
"""
#sourceFlag = np.ones((n_subjects,1))
#%%
#for n, s in enumerate(session1):
# os.chdir(os.path.join(raw_dir,session1[n]))
#
# if s[0:3] == 'nlr':
# subject = s[0:9].upper()
# else:
# subject = 'NLR_' + s[0:6].upper()
#
# os.chdir('inverse')
# fn = 'All_40-sss_eq_'+session1[n]+'-ave.fif'
# evoked = mne.read_evokeds(fn, condition=0,
# baseline=(None,0), kind='average', proj=True)
#
# info = evoked.info
#
# if os.path.isdir('../forward'):
# os.chdir('../forward')
## else:
## temp_src = '/mnt/scratch/NLR_MEG2/' + session1[n] + '/forward'
## temp_dest = '/mnt/scratch/NLR_MEG3/' + session1[n] + '/forward'
## shutil.copytree(temp_src, temp_dest)
# trans = session1[n] + '-trans.fif'
## Take a look at the sensors
# mne.viz.plot_trans(info, trans, subject=subs[n], dig=True,
# meg_sensors=True, subjects_dir=fs_dir)
#%%
#n = 0
#os.chdir(os.path.join(raw_dir,session1[n]))
#os.chdir('raw_fif')
#pos = mne.chpi.read_head_pos('102_rs160618_1_raw.pos')
#mne.viz.plot_head_positions(pos, mode='traces')
#%%
for n, s in enumerate(session1):
os.chdir(os.path.join(raw_dir,session1[n]))
if s[0:3] == 'nlr':
subject = s[0:9].upper()
else:
subject = 'NLR_' + s[0:6].upper()
os.chdir('inverse')
fn = 'All_40-sss_eq_'+session1[n]+'-ave.fif'
evoked = mne.read_evokeds(fn, condition=0,
baseline=(None,0), kind='average', proj=True)
info = evoked.info
if os.path.isdir('../forward'):
os.chdir('../forward')
else:
temp_src = '/mnt/scratch/NLR_MEG2/' + session1[n] + '/forward'
temp_dest = '/mnt/scratch/NLR_MEG3/' + session1[n] + '/forward'
shutil.copytree(temp_src, temp_dest)
trans = session1[n] + '-trans.fif'
# Take a look at the sensors
# mne.viz.plot_trans(info, trans, subject=subs[n], dig=True,
# meg_sensors=True, subjects_dir=fs_dir)
### Read source space
# spacing='oct6' #'ico5' # 10242 * 2
fn2 = subject + '-' + 'ico-5' + '-src.fif' # ico-5
if s == '205_ac151123' or s == '205_ac160202' or s == 'nlr_jb227170811': # NLR_205 has too small head for ico-5
fn2 = subject + '-' + 'oct-6' + '-src.fif'
os.chdir(os.path.join(fs_dir,subject,'bem'))
src = mne.read_source_spaces(fn2)
os.chdir(os.path.join(raw_dir,session1[n]))
os.chdir('forward')
#import numpy as np # noqa
#from mayavi import mlab # noqa
#from surfer import Brain # noqa
#
#brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)
#surf = brain._geo
#
#vertidx = np.where(src[0]['inuse'])[0]
#
#mlab.points3d(surf.x[vertidx], surf.y[vertidx],
# surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
# Create BEM model
conductivity = (0.3,) # for single layer
#conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject=subject, ico=5, # 5=20484, 4=5120
conductivity=conductivity,
subjects_dir=fs_dir)
bem = mne.make_bem_solution(model)
fn = session1[n] + '-bem-sol.fif'
mne.write_bem_solution(fn,bem)
# Now create forward model
fwd = mne.make_forward_solution(info, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=3.0, n_jobs=18)
fwd = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, copy=True)
fn = session1[n] + '-sss-fwd.fif'
mne.write_forward_solution(fn,fwd,overwrite=True)
#Inverse here
# os.chdir('../covariance')
# fn = session1[n] + '-40-sss-cov.fif'
# cov = mne.read_cov(fn)
#
# os.chdir('../inverse')
# # Free: loose = 1; Loose: loose = 0.2
# inv = mne.minimum_norm.make_inverse_operator(info, fwd, cov, loose=0., depth=0.8, use_cps=True)
#
# fn = session1[n] + '-fixed-depth8-inv.fif'
# mne.minimum_norm.write_inverse_operator(fn,inv)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
fds.remove(master_fd)
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except OSError:
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
=======
"""Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
fds.remove(master_fd)
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except OSError:
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
fds.remove(master_fd)
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except OSError:
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import datetime
import os
from py4j.compat import unicode
from pyflink.dataset import ExecutionEnvironment
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import StreamQueryConfig
from pyflink.table.table_environment import BatchTableEnvironment, StreamTableEnvironment
from pyflink.table.table_config import TableConfig
from pyflink.table.types import DataTypes, RowType
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, PyFlinkBatchTableTestCase
class StreamTableEnvironmentTests(PyFlinkStreamTableTestCase):
def test_register_table_source_scan(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
t_env.register_table_source("Source", csv_source)
result = t_env.scan("Source")
self.assertEqual(
'CatalogTable: (path: [default_catalog, default_database, Source], fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_register_table_sink(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]).insert_into("Sinks")
self.env.execute()
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_from_table_source(self):
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
result = self.t_env.from_table_source(csv_source)
self.assertEqual(
'TableSource: (fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_list_tables(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"Results",
source_sink_utils.TestAppendSink(field_names, field_types))
actual = t_env.list_tables()
expected = ['Orders', 'Results', 'Sinks']
self.assert_equals(actual, expected)
def test_explain(self):
schema = RowType()\
.add('a', DataTypes.INT())\
.add('b', DataTypes.STRING())\
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = t_env.explain(result)
assert isinstance(actual, str) or isinstance(actual, unicode)
def test_sql_query(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
result = t_env.sql_query("select a + 1, b, c from %s" % source)
result.insert_into("sinks")
self.env.execute()
actual = source_sink_utils.results()
expected = ['2,Hi,Hello', '3,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.sql_update("insert into sinks select * from %s" % source)
self.env.execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update_with_query_config(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
query_config = StreamQueryConfig()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
t_env.sql_update("insert into sinks select * from %s" % source, query_config)
self.env.execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_query_config(self):
query_config = StreamQueryConfig()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
self.assertEqual(2 * 24 * 3600 * 1000, query_config.get_max_idle_state_retention_time())
self.assertEqual(24 * 3600 * 1000, query_config.get_min_idle_state_retention_time())
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
class BatchTableEnvironmentTests(PyFlinkBatchTableTestCase):
def test_explain(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Source", csv_source)
source = t_env.scan("Source")
result = source.alias("a, b, c").select("1 + a, b, c")
actual = t_env.explain(result)
self.assertIsInstance(actual, (str, unicode))
def test_table_config(self):
table_config = TableConfig()
table_config.set_timezone("Asia/Shanghai")
table_config.set_max_generated_code_length(64000)
table_config.set_null_check(True)
self.assertTrue(table_config.get_null_check())
self.assertEqual(table_config.get_max_generated_code_length(), 64000)
self.assertEqual(table_config.get_timezone(), "Asia/Shanghai")
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
|
|
import operator
import random
from functools import reduce
import numpy as np
import itertools
import math
import acpc_python_client as acpc
from tools.constants import NUM_ACTIONS
from tools.game_tree.builder import GameTreeBuilder
from tools.game_tree.node_provider import NodeProvider
from tools.game_tree.nodes import HoleCardsNode, TerminalNode, StrategyActionNode, BoardCardsNode
from tools.hand_evaluation import get_utility
from tools.game_utils import get_num_hole_card_combinations
from tools.utils import is_unique, intersection
try:
from tqdm import tqdm
except ImportError:
pass
NUM_PLAYERS = 2
class CfrActionNode(StrategyActionNode):
def __init__(self, parent, player):
super().__init__(parent, player)
self.current_strategy = np.zeros(NUM_ACTIONS)
self.regret_sum = np.zeros(NUM_ACTIONS)
self.strategy_sum = np.zeros(NUM_ACTIONS)
class CfrNodeProvider(NodeProvider):
def create_action_node(self, parent, player):
return CfrActionNode(parent, player)
class Cfr:
"""Creates new ACPC Poker game strategy using CFR+ algorithm which runs for specified number of iterations.
!!! Currently only limit betting games with up to 5 cards total and 2 players are supported !!!
"""
def __init__(self, game, show_progress=True):
"""Build new CFR instance.
Args:
game (Game): ACPC game definition object.
"""
self.game = game
self.show_progress = show_progress
if game.get_num_players() != 2:
raise AttributeError(
'Only games with 2 players are supported')
if game.get_betting_type() != acpc.BettingType.LIMIT:
raise AttributeError('No-limit betting games not supported')
total_cards_count = game.get_num_hole_cards() \
+ game.get_total_num_board_cards(game.get_num_rounds() - 1)
if total_cards_count > 5:
raise AttributeError('Only games with up to 5 cards are supported')
game_tree_builder = GameTreeBuilder(game, CfrNodeProvider())
if not self.show_progress:
self.game_tree = game_tree_builder.build_tree()
else:
try:
with tqdm(total=1) as progress:
progress.set_description('Building game tree')
self.game_tree = game_tree_builder.build_tree()
progress.update(1)
except NameError:
self.game_tree = game_tree_builder.build_tree()
@staticmethod
def _calculate_node_average_strategy(node, minimal_action_probability):
normalizing_sum = np.sum(node.strategy_sum)
if normalizing_sum > 0:
node.strategy = np.array(node.strategy_sum) / normalizing_sum
if minimal_action_probability:
normalize = False
for a in range(NUM_ACTIONS):
action_probability = node.strategy[a]
if action_probability > 0 and action_probability < minimal_action_probability:
node.strategy[a] = 0
normalize = True
if normalize:
node.strategy = node.strategy / sum(node.strategy)
else:
action_probability = 1.0 / len(node.children)
node.strategy = [
action_probability if a in node.children else 0
for a in range(NUM_ACTIONS)]
@staticmethod
def _calculate_tree_average_strategy(node, minimal_action_probability):
if isinstance(node, CfrActionNode):
Cfr._calculate_node_average_strategy(node, minimal_action_probability)
if node.children:
for child in node.children.values():
Cfr._calculate_tree_average_strategy(child, minimal_action_probability)
def _get_algorithm_name(self):
return 'CFR'
def train(
self,
iterations,
weight_delay=700,
checkpoint_iterations=None,
checkpoint_callback=lambda *args: None,
minimal_action_probability=None):
"""Run CFR for given number of iterations.
The trained tree can be found by retrieving the game_tree
property from this object. The result strategy is stored
in average_strategy of each ActionNode in game tree.
This method can be called multiple times on one instance
to train more. This can be used for evaluation during training
and to make number of training iterations dynamic.
Args:
iterations (int): Number of iterations.
show_progress (bool): Show training progress bar.
"""
if not self.show_progress:
iterations_iterable = range(iterations)
else:
try:
iterations_iterable = tqdm(range(iterations))
iterations_iterable.set_description('%s training' % self._get_algorithm_name())
except NameError:
iterations_iterable = range(iterations)
if iterations <= weight_delay:
raise AttributeError('Number of iterations must be larger than weight delay')
if checkpoint_iterations is None or checkpoint_iterations <= 0 or checkpoint_iterations > iterations:
checkpoint_iterations = iterations
iterations_left_to_checkpoint = weight_delay + checkpoint_iterations
checkpoint_index = 0
for i in iterations_iterable:
self.weight = max(i - weight_delay, 0)
for player in range(2):
self._start_iteration(player)
iterations_left_to_checkpoint -= 1
if iterations_left_to_checkpoint == 0 or i == iterations - 1:
Cfr._calculate_tree_average_strategy(self.game_tree, minimal_action_probability)
checkpoint_callback(self.game_tree, checkpoint_index, i + 1)
checkpoint_index += 1
iterations_left_to_checkpoint = checkpoint_iterations
return self.game_tree
def _start_iteration(self, player):
self._cfr(
player,
[self.game_tree] * NUM_PLAYERS,
None,
[],
[False] * NUM_PLAYERS,
1)
def _cfr(self, player, nodes, hole_cards, board_cards, players_folded, opponent_reach_prob):
node_type = type(nodes[0])
if node_type == TerminalNode:
return self._cfr_terminal(
player,
nodes,
hole_cards,
board_cards,
players_folded,
opponent_reach_prob)
elif node_type == HoleCardsNode:
return self._cfr_hole_cards(
player,
nodes,
hole_cards,
board_cards,
players_folded,
opponent_reach_prob)
elif node_type == BoardCardsNode:
return self._cfr_board_cards(
player,
nodes,
hole_cards,
board_cards,
players_folded,
opponent_reach_prob)
else:
return self._cfr_action(
player,
nodes,
hole_cards,
board_cards,
players_folded,
opponent_reach_prob)
def _cfr_terminal(self, player, nodes, hole_cards, board_cards, players_folded, opponent_reach_prob):
return get_utility(
hole_cards,
board_cards,
players_folded,
nodes[0].pot_commitment)[player] * opponent_reach_prob
def _cfr_hole_cards(self, player, nodes, hole_cards, board_cards, players_folded, opponent_reach_prob):
hole_card_combination_probability = 1 / get_num_hole_card_combinations(self.game)
hole_cards = [node.children for node in nodes]
hole_card_combinations = filter(lambda comb: is_unique(*comb), itertools.product(*hole_cards))
value_sum = 0
for hole_cards_combination in hole_card_combinations:
next_nodes = [node.children[hole_cards_combination[i]] for i, node in enumerate(nodes)]
player_utility = self._cfr(
player,
next_nodes,
hole_cards_combination,
board_cards,
players_folded,
opponent_reach_prob)
value_sum += player_utility * hole_card_combination_probability
return value_sum
def _cfr_board_cards(self, player, nodes, hole_cards, board_cards, players_folded, opponent_reach_prob):
possible_board_cards = intersection(*map(lambda node: node.children, nodes))
board_cards_combination_probability = 1 / len(possible_board_cards)
value_sum = 0
for next_board_cards in possible_board_cards:
selected_board_cards = sorted(next_board_cards)
selected_board_cards_key = tuple(selected_board_cards)
next_nodes = [node.children[selected_board_cards_key] for i, node in enumerate(nodes)]
player_utility = self._cfr(
player,
next_nodes,
hole_cards,
board_cards + list(selected_board_cards),
players_folded,
opponent_reach_prob)
value_sum += player_utility * board_cards_combination_probability
return value_sum
@staticmethod
def _regret_matching(nodes):
node = nodes[nodes[0].player]
normalizing_sum = np.sum(node.regret_sum)
if normalizing_sum > 0:
node.current_strategy = node.regret_sum / normalizing_sum
else:
action_probability = 1 / len(node.children)
current_strategy = np.zeros(NUM_ACTIONS)
for a in node.children:
current_strategy[a] = action_probability
node.current_strategy = current_strategy
def _get_current_strategy(self, nodes):
return nodes[nodes[0].player].current_strategy
def _get_opponent_strategy(self, player, nodes):
return self._get_current_strategy(nodes)
def _cfr_action(self, player, nodes, hole_cards, board_cards, players_folded, opponent_reach_prob):
node_player = nodes[0].player
node = nodes[node_player]
node_util = 0
if player == node_player:
current_strategy = self._get_current_strategy(nodes)
util = np.zeros(NUM_ACTIONS)
for a in node.children:
if a == 0:
next_players_folded = list(players_folded)
next_players_folded[node_player] = True
else:
next_players_folded = players_folded
action_util = self._cfr(
player,
[node.children[a] for node in nodes],
hole_cards,
board_cards,
next_players_folded,
opponent_reach_prob)
util[a] = action_util
node_util += current_strategy[a] * action_util
for a in node.children:
node.regret_sum[a] = max(node.regret_sum[a] + util[a] - node_util, 0)
else:
Cfr._regret_matching(nodes)
current_strategy = self._get_current_strategy(nodes)
node.strategy_sum += opponent_reach_prob * current_strategy * self.weight
opponent_strategy = self._get_opponent_strategy(player, nodes)
for a in node.children:
if a == 0:
next_players_folded = list(players_folded)
next_players_folded[node_player] = True
else:
next_players_folded = players_folded
node_util += self._cfr(
player,
[node.children[a] for node in nodes],
hole_cards,
board_cards,
next_players_folded,
opponent_reach_prob * opponent_strategy[a])
return node_util
|
|
"""Support for computing limited differences needed in reconstruction
of slopes in constructing interface states."""
import numpy as np
import sys
def limit(data, myg, idir, limiter):
""" a single driver that calls the different limiters based on the value
of the limiter input variable."""
if limiter == 0:
return nolimit(data, myg, idir)
elif limiter == 1:
return limit2(data, myg, idir)
else:
return limit4(data, myg, idir)
def well_balance(q, myg, limiter, iv, grav):
"""subtract off the hydrostatic pressure before limiting. Note, this
only considers the y direction."""
if limiter != 1:
sys.exit("well-balanced only works for limiter == 1")
p1 = myg.scratch_array()
p1_jp1 = myg.scratch_array()
p1_jm1 = myg.scratch_array()
p1.v(buf=4)[:, :] = 0.0
p1_jp1.v(buf=3)[:, :] = q.jp(1, buf=3, n=iv.ip) - (q.v(buf=3, n=iv.ip) +
0.5*myg.dy*(q.v(buf=3, n=iv.irho) + q.jp(1, buf=3, n=iv.irho))*grav)
p1_jm1.v(buf=3)[:, :] = q.jp(-1, buf=3, n=iv.ip) - (q.v(buf=3, n=iv.ip) -
0.5*myg.dy*(q.v(buf=3, n=iv.irho) + q.jp(-1, buf=3, n=iv.irho))*grav)
# now limit p1 using these -- this is the 2nd order MC limiter
lda_tmp = myg.scratch_array()
dc = myg.scratch_array()
dl = myg.scratch_array()
dr = myg.scratch_array()
dc.v(buf=2)[:, :] = 0.5*(p1_jp1.v(buf=2) - p1_jm1.v(buf=2))
dl.v(buf=2)[:, :] = p1_jp1.v(buf=2) - p1.v(buf=2)
dr.v(buf=2)[:, :] = p1.v(buf=2) - p1_jm1.v(buf=2)
d1 = 2.0*np.where(np.fabs(dl) < np.fabs(dr), dl, dr)
dt = np.where(np.fabs(dc) < np.fabs(d1), dc, d1)
lda_tmp.v(buf=myg.ng)[:, :] = np.where(dl*dr > 0.0, dt, 0.0)
return lda_tmp
def nolimit(a, myg, idir):
""" just a centered difference without any limiting """
lda = myg.scratch_array()
if idir == 1:
lda.v(buf=2)[:, :] = 0.5*(a.ip(1, buf=2) - a.ip(-1, buf=2))
elif idir == 2:
lda.v(buf=2)[:, :] = 0.5*(a.jp(1, buf=2) - a.jp(-1, buf=2))
return lda
def limit2(a, myg, idir):
""" 2nd order monotonized central difference limiter """
lda = myg.scratch_array()
dc = myg.scratch_array()
dl = myg.scratch_array()
dr = myg.scratch_array()
if idir == 1:
dc.v(buf=2)[:, :] = 0.5*(a.ip(1, buf=2) - a.ip(-1, buf=2))
dl.v(buf=2)[:, :] = a.ip(1, buf=2) - a.v(buf=2)
dr.v(buf=2)[:, :] = a.v(buf=2) - a.ip(-1, buf=2)
elif idir == 2:
dc.v(buf=2)[:, :] = 0.5*(a.jp(1, buf=2) - a.jp(-1, buf=2))
dl.v(buf=2)[:, :] = a.jp(1, buf=2) - a.v(buf=2)
dr.v(buf=2)[:, :] = a.v(buf=2) - a.jp(-1, buf=2)
d1 = 2.0*np.where(np.fabs(dl) < np.fabs(dr), dl, dr)
dt = np.where(np.fabs(dc) < np.fabs(d1), dc, d1)
lda.v(buf=myg.ng)[:, :] = np.where(dl*dr > 0.0, dt, 0.0)
return lda
def limit4(a, myg, idir):
""" 4th order monotonized central difference limiter """
lda_tmp = limit2(a, myg, idir)
lda = myg.scratch_array()
dc = myg.scratch_array()
dl = myg.scratch_array()
dr = myg.scratch_array()
if idir == 1:
dc.v(buf=2)[:, :] = (2./3.)*(a.ip(1, buf=2) - a.ip(-1, buf=2) -
0.25*(lda_tmp.ip(1, buf=2) + lda_tmp.ip(-1, buf=2)))
dl.v(buf=2)[:, :] = a.ip(1, buf=2) - a.v(buf=2)
dr.v(buf=2)[:, :] = a.v(buf=2) - a.ip(-1, buf=2)
elif idir == 2:
dc.v(buf=2)[:, :] = (2./3.)*(a.jp(1, buf=2) - a.jp(-1, buf=2) -
0.25*(lda_tmp.jp(1, buf=2) + lda_tmp.jp(-1, buf=2)))
dl.v(buf=2)[:, :] = a.jp(1, buf=2) - a.v(buf=2)
dr.v(buf=2)[:, :] = a.v(buf=2) - a.jp(-1, buf=2)
d1 = 2.0*np.where(np.fabs(dl) < np.fabs(dr), dl, dr)
dt = np.where(np.fabs(dc) < np.fabs(d1), dc, d1)
lda.v(buf=myg.ng)[:, :] = np.where(dl*dr > 0.0, dt, 0.0)
return lda
def flatten(myg, q, idir, ivars, rp):
""" compute the 1-d flattening coefficients """
xi = myg.scratch_array()
z = myg.scratch_array()
t1 = myg.scratch_array()
t2 = myg.scratch_array()
delta = rp.get_param("compressible.delta")
z0 = rp.get_param("compressible.z0")
z1 = rp.get_param("compressible.z1")
smallp = 1.e-10
if idir == 1:
t1.v(buf=2)[:, :] = abs(q.ip(1, n=ivars.ip, buf=2) -
q.ip(-1, n=ivars.ip, buf=2))
t2.v(buf=2)[:, :] = abs(q.ip(2, n=ivars.ip, buf=2) -
q.ip(-2, n=ivars.ip, buf=2))
z[:, :] = t1/np.maximum(t2, smallp)
t2.v(buf=2)[:, :] = t1.v(buf=2)/np.minimum(q.ip(1, n=ivars.ip, buf=2),
q.ip(-1, n=ivars.ip, buf=2))
t1.v(buf=2)[:, :] = q.ip(-1, n=ivars.iu, buf=2) - q.ip(1, n=ivars.iu, buf=2)
elif idir == 2:
t1.v(buf=2)[:, :] = abs(q.jp(1, n=ivars.ip, buf=2) -
q.jp(-1, n=ivars.ip, buf=2))
t2.v(buf=2)[:, :] = abs(q.jp(2, n=ivars.ip, buf=2) -
q.jp(-2, n=ivars.ip, buf=2))
z[:, :] = t1/np.maximum(t2, smallp)
t2.v(buf=2)[:, :] = t1.v(buf=2)/np.minimum(q.jp(1, n=ivars.ip, buf=2),
q.jp(-1, n=ivars.ip, buf=2))
t1.v(buf=2)[:, :] = q.jp(-1, n=ivars.iv, buf=2) - q.jp(1, n=ivars.iv, buf=2)
xi.v(buf=myg.ng)[:, :] = np.minimum(1.0, np.maximum(0.0, 1.0 - (z - z0)/(z1 - z0)))
xi[:, :] = np.where(np.logical_and(t1 > 0.0, t2 > delta), xi, 1.0)
return xi
def flatten_multid(myg, q, xi_x, xi_y, ivars):
""" compute the multidimensional flattening coefficient """
xi = myg.scratch_array()
px = np.where(q.ip(1, n=ivars.ip, buf=2) -
q.ip(-1, n=ivars.ip, buf=2) > 0,
xi_x.ip(-1, buf=2), xi_x.ip(1, buf=2))
py = np.where(q.jp(1, n=ivars.ip, buf=2) -
q.jp(-1, n=ivars.ip, buf=2) > 0,
xi_y.jp(-1, buf=2), xi_y.jp(1, buf=2))
xi.v(buf=2)[:, :] = np.minimum(np.minimum(xi_x.v(buf=2), px),
np.minimum(xi_y.v(buf=2), py))
return xi
# Constants for the WENO reconstruction
# NOTE: integer division laziness means this WILL fail on python2
C_3 = np.array([1, 2]) / 3
a_3 = np.array([[3, -1],
[1, 1]]) / 2
sigma_3 = np.array([[[1, 0],
[-2, 1]],
[[1, 0],
[-2, 1]]])
C_5 = np.array([1, 6, 3]) / 10
a_5 = np.array([[11, -7, 2],
[2, 5, -1],
[-1, 5, 2]]) / 6
sigma_5 = np.array([[[40, 0, 0],
[-124, 100, 0],
[44, -76, 16]],
[[16, 0, 0],
[-52, 52, 0],
[20, -52, 16]],
[[16, 0, 0],
[-76, 100, 0],
[44, -124, 40]]]) / 12
C_all = {2: C_3,
3: C_5}
a_all = {2: a_3,
3: a_5}
sigma_all = {2: sigma_3,
3: sigma_5}
def weno_upwind(q, order):
"""
Perform upwinded (left biased) WENO reconstruction
Parameters
----------
q : np array
input data
order : int
WENO order (k)
Returns
-------
q_plus : np array
data reconstructed to the right
"""
a = a_all[order]
C = C_all[order]
sigma = sigma_all[order]
epsilon = 1e-16
alpha = np.zeros(order)
beta = np.zeros(order)
q_stencils = np.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k] += sigma[k, l, m] * q[order-1+k-l] * q[order-1+k-m]
alpha[k] = C[k] / (epsilon + beta[k]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[order-1+k-l]
w = alpha / np.sum(alpha)
return np.dot(w, q_stencils)
def weno(q, order):
"""
Perform WENO reconstruction
Parameters
----------
q : np array
input data with 3 ghost zones
order : int
WENO order (k)
Returns
-------
q_plus, q_minus : np array
data reconstructed to the right / left respectively
"""
Npoints = q.shape
q_minus = np.zeros_like(q)
q_plus = np.zeros_like(q)
for i in range(order, Npoints-order):
q_plus[i] = weno_upwind(q[i+1-order:i+order], order)
q_minus[i] = weno_upwind(q[i+order-1:i-order:-1], order)
return q_minus, q_plus
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from sentry.interfaces.stacktrace import (
Frame, Stacktrace, get_context, slim_frame_data
)
from sentry.models import Event
from sentry.testutils import TestCase
class GetContextTest(TestCase):
def test_works_with_empty_filename(self):
result = get_context(0, 'hello world')
assert result == [(0, 'hello world')]
class StacktraceTest(TestCase):
@fixture
def interface(self):
return Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/bar.py'
},
{
'filename': 'foo/baz.py',
'lineno': 1,
'in_app': True,
}
]))
def test_legacy_interface(self):
# Simple test to ensure legacy data works correctly with the ``Frame``
# objects
event = self.event
interface = Stacktrace.to_python(event.data['sentry.interfaces.Stacktrace'])
assert len(interface.frames) == 5
assert interface == event.interfaces['sentry.interfaces.Stacktrace']
def test_requires_filename(self):
with self.assertRaises(AssertionError):
Stacktrace.to_python(dict(frames=[{}]))
Stacktrace.to_python(dict(frames=[{
'filename': 'foo.py',
}]))
Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
}]))
def test_allows_abs_path_without_filename(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'abs_path': 'foo/bar/baz.py',
}]))
frame = interface.frames[0]
assert frame.filename == 'foo/bar/baz.py'
assert frame.abs_path == frame.filename
def test_coerces_url_filenames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'http://foo.com/foo.js',
}]))
frame = interface.frames[0]
assert frame.filename == '/foo.js'
assert frame.abs_path == 'http://foo.com/foo.js'
def test_does_not_overwrite_filename(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.js',
'abs_path': 'http://foo.com/foo.js',
}]))
frame = interface.frames[0]
assert frame.filename == 'foo.js'
assert frame.abs_path == 'http://foo.com/foo.js'
def test_ignores_results_with_empty_path(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'http://foo.com',
}]))
frame = interface.frames[0]
assert frame.filename == 'http://foo.com'
assert frame.abs_path == frame.filename
def test_serialize_returns_frames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
}]))
result = interface.to_json()
assert 'frames' in result
def test_hash_without_system_frames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}, {
'lineno': 1,
'filename': 'bar.py',
'in_app': None,
}]))
result = interface.get_hash(system_frames=False)
assert result == ['foo.py', 1]
result = interface.get_hash(system_frames=True)
assert result == ['foo.py', 1, 'bar.py', 1]
def test_compute_hashes(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}, {
'lineno': 1,
'filename': 'bar.py',
'in_app': None,
}]))
result = interface.compute_hashes('python')
assert result == [['foo.py', 1, 'bar.py', 1], ['foo.py', 1]]
def test_get_hash_with_only_required_vars(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 1])
def test_get_hash_sanitizes_block_functions(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': 'foo.py',
'function': 'block in _conditional_callback_around_233',
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'block'])
def test_get_hash_sanitizes_versioned_filenames(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': '/data/foo/releases/20140114151955/app/views/foo.html.erb',
'context_line': '<% if @hotels.size > 0 %>',
})
result = interface.get_hash()
self.assertEquals(result, [
'/data/foo/releases/<version>/app/views/foo.html.erb',
'<% if @hotels.size > 0 %>',
])
interface = Frame.to_python({
'filename': '20140114151955/app/views/foo.html.erb',
'context_line': '<% if @hotels.size > 0 %>',
})
result = interface.get_hash()
self.assertEquals(result, [
'<version>/app/views/foo.html.erb',
'<% if @hotels.size > 0 %>',
])
def test_get_hash_ignores_java8_lambda_module(self):
interface = Frame.to_python({
'module': 'foo.bar.Baz$$Lambda$40/1673859467',
'function': 'call',
})
result = interface.get_hash()
self.assertEquals(result, [
'<module>',
'call',
])
def test_get_hash_ignores_java8_lambda_function(self):
interface = Frame.to_python({
'module': 'foo.bar.Baz',
'function': 'lambda$work$1',
})
result = interface.get_hash()
self.assertEquals(result, [
'foo.bar.Baz',
'<function>',
])
def test_get_hash_sanitizes_erb_templates(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': 'foo.html.erb',
'function': '_foo_html_erb__3327151541118998292_70361296749460',
})
result = interface.get_hash()
self.assertEquals(result, [
'foo.html.erb', '_foo_html_erb__<anon>_<anon>',
])
def test_get_hash_ignores_filename_if_http(self):
interface = Frame.to_python({
'context_line': 'hello world',
'filename': 'http://foo.com/foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_ignores_filename_if_https(self):
interface = Frame.to_python({
'context_line': 'hello world',
'filename': 'https://foo.com/foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_ignores_filename_if_abs_path_is_http(self):
interface = Frame.to_python({
'context_line': 'hello world',
'abs_path': 'https://foo.com/foo.py',
'filename': 'foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_uses_module_over_filename(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
'module': 'foo'
})
result = interface.get_hash()
self.assertEquals(result, ['foo', 1])
def test_get_hash_uses_function_over_lineno(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
'function': 'bar'
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'bar'])
def test_get_hash_uses_context_line_over_function(self):
interface = Frame.to_python({
'context_line': 'foo bar',
'lineno': 1,
'filename': 'foo.py',
'function': 'bar'
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'foo bar'])
def test_get_hash_discards_seemingly_useless_stack(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '<HTML>',
'lineno': 1,
'abs_path': 'http://example.com/foo',
'filename': 'foo',
'function': '?',
}],
})
result = interface.get_hash()
assert result == []
def test_get_hash_does_not_discard_non_urls(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '<HTML>',
'lineno': 1,
'abs_path': 'foo',
'filename': 'foo',
'function': '?',
}],
})
result = interface.get_hash()
assert result != []
def test_get_hash_does_not_discard_js_frames(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': 'function foo() {}',
'lineno': 1,
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'function': '?',
}],
})
result = interface.get_hash()
assert result != []
@mock.patch('sentry.interfaces.stacktrace.Stacktrace.get_stacktrace')
def test_to_string_returns_stacktrace(self, get_stacktrace):
event = mock.Mock(spec=Event())
interface = Stacktrace(frames=[])
result = interface.to_string(event)
get_stacktrace.assert_called_once_with(event, system_frames=False, max_frames=10)
self.assertEquals(result, get_stacktrace.return_value)
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
@mock.patch('sentry.interfaces.stacktrace.Stacktrace.get_stacktrace')
def test_get_traceback_response(self, get_stacktrace):
event = mock.Mock(spec=Event())
event.message = 'foo'
get_stacktrace.return_value = 'bar'
interface = Stacktrace.to_python(dict(frames=[{'lineno': 1, 'filename': 'foo.py'}]))
result = interface.get_traceback(event)
get_stacktrace.assert_called_once_with(event, newest_first=None)
self.assertEquals(result, 'foo\n\nbar')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_only_filename(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo'}, {'filename': 'bar'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo"\n File "bar"')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_module(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'module': 'foo'}, {'module': 'bar'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n Module "foo"\n Module "bar"')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_and_function(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo', 'function': 'biz'}, {'filename': 'bar', 'function': 'baz'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", in biz\n File "bar", in baz')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_function_lineno_and_context(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[
{'filename': 'foo', 'function': 'biz', 'lineno': 3, 'context_line': ' def foo(r):'},
{'filename': 'bar', 'function': 'baz', 'lineno': 5, 'context_line': ' return None'},
]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", line 3, in biz\n def foo(r):\n File "bar", line 5, in baz\n return None')
class SlimFrameDataTest(TestCase):
def test_under_max(self):
value = {'frames': [{'filename': 'foo'}]}
slim_frame_data(value)
assert len(value['frames']) == 1
assert value.get('frames_omitted') is None
def test_over_max(self):
values = []
for n in xrange(5):
values.append({
'filename': 'frame %d' % n,
'vars': {},
'pre_context': [],
'post_context': [],
})
value = {'frames': values}
slim_frame_data(value, 4)
assert len(value['frames']) == 5
for value, num in zip(values[:2], xrange(2)):
assert value['filename'] == 'frame %d' % num
assert value['vars'] is not None
assert value['pre_context'] is not None
assert value['post_context'] is not None
assert values[2]['filename'] == 'frame 2'
assert 'vars' not in values[2]
assert 'pre_context' not in values[2]
assert 'post_context' not in values[2]
for value, num in zip(values[3:], xrange(3, 5)):
assert value['filename'] == 'frame %d' % num
assert value['vars'] is not None
assert value['pre_context'] is not None
assert value['post_context'] is not None
|
|
from django.test import TestCase
from django.db.utils import IntegrityError
from pyday_social_network.models import PyDayUser, FollowingRelation
from pyday_social_network.views import *
from django.test import Client
from django.core.urlresolvers import reverse
# from django.core.files.uploadedfile import SimpleUploadedFile
from django.test.client import RequestFactory
from pyday_social_network.services import *
from pyday_social_network.forms import RegisterUserForm
from datetime import datetime
from pyday.settings import GREETINGS
class PyDayUserTest(TestCase):
def setUp(self):
self.user = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
def test_user_creation(self):
self.assertEqual(self.user.first_name, "MynameisWhat")
self.assertEqual(self.user.last_name, "MynameisWho")
self.assertEqual(self.user.email, "[email protected]")
self.assertNotEqual(self.user.password, "secret")
def test_user_creation_save(self):
PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
self.assertEqual(len(PyDayUser.objects.all()), 2)
def test_invalid_email(self):
with self.assertRaises(ValueError):
PyDayUser.objects._create_user("", "secret",
"MynameisWhat",
"MynameisWho")
def test_unique_email(self):
with self.assertRaises(IntegrityError):
PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
class FollowingRelationTest(TestCase):
def setUp(self):
self.user = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
self.user2 = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
def test_follow(self):
self.user.follow(self.user2.id)
self.assertEqual(len(FollowingRelation.objects.all()), 1)
rel = FollowingRelation.objects.get(pk=1)
self.assertEqual(rel.followed, self.user2)
self.assertEqual(rel.follower, self.user)
def test_unfollow(self):
self.user.follow(self.user2.id)
self.user.unfollow(self.user2.id)
self.assertEqual(len(FollowingRelation.objects.all()), 0)
def test_unfollow_not_followed(self):
self.assertFalse(self.user.unfollow(self.user2.id)[0])
def test_follow_followed(self):
self.user.follow(self.user2.id)
self.assertFalse(self.user.follow(self.user2.id)[0])
def test_following_followers(self):
self.user.follow(self.user2.id)
self.assertEqual(self.user.following, [self.user2])
self.assertEqual(self.user2.followers, [self.user])
def test_friends(self):
self.user.follow(self.user2.id)
self.user2.follow(self.user.id)
self.assertEqual(self.user.friends, [self.user2])
self.assertEqual(self.user2.friends, [self.user])
# Song...
class RegisterViewTest(TestCase):
def setUp(self):
self.client = Client()
def test_post(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:register_login'),
{'first_name': 'ha', 'last_name': 'ha',
'email': '[email protected]',
'password': '1'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(PyDayUser.objects.all()), 1)
self.assertTrue(PyDayUser.objects.get(email='[email protected]'))
self.assertContains(response, "You have registered")
def test_post_not_successful(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:register_login'),
{'first_name': 'ha', 'last_name': 'ha',
'email': '', 'password': '1'})
class UploadPictureViewTest(TestCase):
def setUp(self):
self.client = Client()
self.user = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
self.user2 = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
self.client.login(email='[email protected]', password='secret')
def test_post(self):
picture = open('./media/pictures/profile.jpg', 'rb')
response = self.client.post(reverse('pyday_social_network:upload_picture'),
{'picture': picture})
self.assertNotEqual(PyDayUser.objects.get(
email="[email protected]").picture, PyDayUser.objects.get(email="[email protected]").picture)
def tearDown(self):
PyDayUser.objects.get(email='[email protected]').delete()
class ViewsTestNotLogged(TestCase):
def setUp(self):
self.client = Client()
self.user = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
def test_login_redirect(self):
response = self.client.get(
reverse('pyday_social_network:login'), follow=True)
self.assertEqual(response.redirect_chain,
[('/social/main', 302), ('/social/main/', 301),
('/social/register/?next=/social/main/', 302)])
def test_login_redirect_anonymous(self):
self.client.login(email='[email protected]', password='secret')
with self.assertTemplateUsed('main.html'):
response = self.client.get(
reverse('pyday_social_network:login'), follow=True)
self.assertEqual(response.redirect_chain,
[('/social/main', 302), ('/social/main/', 301)])
def test_login_success(self):
with self.assertTemplateUsed('main.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'email': "[email protected]",
'password': 'secret'},
follow=True)
self.assertEqual(response.redirect_chain, [('/social/main', 302),
('/social/main/', 301)])
def test_login_fail(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'email': "[email protected]",
'password': 'secretaaa'},
follow=True)
self.assertContains(response, "Invalid email/password")
def test_login_invalid_form(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'password': 'secret'},
follow=True)
self.assertContains(response, "Invalid form")
def test_login_wrong_email(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'email': "[email protected]",
'password': 'secret'},
follow=True)
self.assertContains(response, "Invalid email/password")
class ViewsTestLogged(TestCase):
def setUp(self):
self.client = Client()
self.user = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
self.client.login(email='[email protected]', password='secret')
def test_main(self):
with self.assertTemplateUsed('main.html'):
response = self.client.get(reverse('pyday_social_network:main'))
self.assertEqual(response.status_code, 200)
def test_logout(self):
with self.assertTemplateUsed('error.html'):
response = self.client.get(reverse('pyday_social_network:logout'))
self.assertEqual(response.status_code, 200)
def test_display_all_users(self):
self.display('pyday_social_network:all_users')
def display(self, name_url):
with self.assertTemplateUsed('all_users.html'):
response = self.client.get(reverse(name_url))
self.assertEqual(response.status_code, 200)
def test_display_following(self):
self.display('pyday_social_network:following')
def test_display_followers(self):
self.display('pyday_social_network:followers')
def test_display_friends(self):
self.display('pyday_social_network:friends')
def test_follow(self):
user2 = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
response = self.client.get(
'/social/follow/{}'.format(user2.id), follow=True)
self.assertTrue(PyDayUser.objects.get(email=user2.email)
in PyDayUser.objects.get(email=self.user.email).following)
self.assertTrue(PyDayUser.objects.get(email=self.user.email)
in PyDayUser.objects.get(email=user2.email).followers)
self.assertEqual(response.redirect_chain,
[('/social/profile', 302), ('/social/profile/', 301)])
def test_follow_already_following(self):
user2 = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
self.user.follow(user2.id)
with self.assertTemplateUsed('error.html'):
response = self.client.get('/social/follow/{}'.format(user2.id))
self.assertContains(
response, "You have already followed this user")
def test_unfollow(self):
user2 = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
self.user.follow(user2.id)
response = self.client.get(
'/social/unfollow/{}'.format(user2.id), follow=True)
self.assertTrue(PyDayUser.objects.get(email=user2.email)
not in PyDayUser.objects.get(email=self.user.email).following)
self.assertTrue(PyDayUser.objects.get(email=self.user.email)
not in PyDayUser.objects.get(email=user2.email).followers)
self.assertEqual(response.redirect_chain,
[('/social/profile', 302), ('/social/profile/', 301)])
def test_unfollow_already_not_following(self):
user2 = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
with self.assertTemplateUsed('error.html'):
response = self.client.get('/social/unfollow/{}'.format(user2.id))
self.assertContains(response, "You do not follow this user")
def test_display_profile(self):
with self.assertTemplateUsed('profile.html'):
response = self.client.get(
"/social/profile/{}".format(self.user.id))
self.assertEqual(response.status_code, 200)
def test_display_non_existing(self):
with self.assertTemplateUsed('error.html'):
response = self.client.get("/social/profile/100")
self.assertContains(response, 'User does not exist.')
class ServicesTest(TestCase):
def test_regitster_user_post(self):
rf = RequestFactory()
post_request = rf.post('', {'email': "[email protected]",
'password': "secret",
'first_name': "MynameisWhat",
'last_name': "MynameisWho"})
self.assertTrue(register_user_post(post_request, RegisterUserForm))
self.assertEqual(len(PyDayUser.objects.all()), 1)
user = PyDayUser.objects.get(id=1)
self.assertEqual(user.email, "[email protected]")
self.assertNotEqual(user.password, "secret")
self.assertEqual(user.first_name, "MynameisWhat")
self.assertEqual(user.last_name, "MynameisWho")
def test_current_events(self):
date_time = datetime.now()
user = PyDayUser.objects._create_user("[email protected]", "secret",
"MynameisWhat",
"MynameisWho")
event = Event(owner=user, from_time=date_time.hour,
to_time=date_time.hour + 1,
importance="important", caption="",
date=date_time, title="title")
event.save()
self.assertEqual(get_current_events(date_time.hour, date_time, user),
[event])
def test_get_greeting(self):
self.assertEqual(get_greeting(9), GREETINGS[0][2])
self.assertEqual(get_greeting(12), GREETINGS[1][2])
self.assertEqual(get_greeting(16), GREETINGS[2][2])
self.assertEqual(get_greeting(21), GREETINGS[3][2])
'''class FormTests(TestCase):
def test_form_upload_picture(self):
picture = open('./media/pictures/profile.jpg', 'rb')
file_dict = {'file': SimpleUploadedFile(picture.name, picture.read())}
form = UploadPictureForm(file_dict)
self.assertTrue(form.is_valid())
'''
|
|
"""
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
DropOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano.sandbox.scan import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
rect = lambda x:TT.maximum(0., x)
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
## BEGIN Exercise: DOT-RNN
### Neural Implementation of the Operators: \lhd
#### Exercise (1)
#### Hidden state -> Intermediate Layer
emb_state = MultiLayer(
rng,
n_in=eval(state['nhids'])[-1],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn=eval(state['dout_init']),
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
learn_bias = True,
bias_scale=eval(state['dout_bias']),
name='emb_state')
#### Exercise (1)
#### Input -> Intermediate Layer
emb_words_out = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
rank_n_approx=state['dout_rank_n_approx'],
learn_bias = False,
bias_scale=eval(state['dout_bias']),
name='emb_words_out')
#### Hidden State: Combine emb_state and emb_words_out
#### Exercise (1)
outhid_activ = UnaryOp(activation=eval(state['dout_activ']))
#### Exercise (2)
outhid_dropout = DropOp(dropout=state['dropout'], rng=rng)
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['dout_nhid']),
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
additional_inputs = [rec_layer, shortcut(x)]
else:
additional_inputs = [rec_layer]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid)
train_model = output_layer(outhid,
no_noise_bias=state['no_noise_bias'],
additional_inputs=additional_inputs).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid, use_noise=False)
if state['shortcut_inpout']:
additional_inputs=[rec_layer, shortcut(x, use_noise=False)]
else:
additional_inputs=[rec_layer]
valid_model = output_layer(outhid,
additional_inputs=additional_inputs,
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.out,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
outhid = outhid_dropout(outhid_activ(emb_state(h0, use_noise=False, one_step=True) +
emb_words_out(word_tm1, use_noise=False, one_step=True), one_step=True),
use_noise=False, one_step=True)
word = output_layer.get_sample(state_below=outhid, additional_inputs=[h0], temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### build a Theano function for sampling
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
## Run!
main.main()
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[200]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
state['dout_nhid'] = '200'
state['dout_activ'] = '"TT.nnet.sigmoid"'
state['dout_sparse']= 20
state['dout_scale'] = 1.
state['dout_bias'] = '[0]'
state['dout_init'] = "'sample_weights'"
state['dout_rank_n_approx'] = 0
state['dropout'] = .5
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[100, 100]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learning rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
from telemetry.core import util
from telemetry.core.chrome import browser_backend
from telemetry.core.chrome import cros_util
class CrOSBrowserBackend(browser_backend.BrowserBackend):
# Some developers' workflow includes running the Chrome process from
# /usr/local/... instead of the default location. We have to check for both
# paths in order to support this workflow.
CHROME_PATHS = ['/opt/google/chrome/chrome ',
'/usr/local/opt/google/chrome/chrome ']
def __init__(self, browser_type, options, cri, is_guest):
super(CrOSBrowserBackend, self).__init__(is_content_shell=False,
supports_extensions=True, options=options)
# Initialize fields so that an explosion during init doesn't break in Close.
self._browser_type = browser_type
self._options = options
self._cri = cri
self._is_guest = is_guest
self._remote_debugging_port = self._cri.GetRemotePort()
self._port = self._remote_debugging_port
self._login_ext_dir = os.path.join(os.path.dirname(__file__),
'chromeos_login_ext')
if not cri.local:
# Push a dummy login extension to the device.
# This extension automatically logs in as [email protected]
logging.info('Copying dummy login extension to the device')
cri.PushFile(self._login_ext_dir, '/tmp/')
self._login_ext_dir = '/tmp/chromeos_login_ext'
cri.RunCmdOnDevice(['chown', '-R', 'chronos:chronos',
self._login_ext_dir])
# Copy local extensions to temp directories on the device.
for e in options.extensions_to_load:
output = cri.RunCmdOnDevice(['mktemp', '-d', '/tmp/extension_XXXXX'])
extension_dir = output[0].rstrip()
cri.PushFile(e.path, extension_dir)
cri.RunCmdOnDevice(['chown', '-R', 'chronos:chronos', extension_dir])
e.local_path = os.path.join(extension_dir, os.path.basename(e.path))
# Ensure the UI is running and logged out.
self._RestartUI()
util.WaitFor(lambda: self.IsBrowserRunning(), 20) # pylint: disable=W0108
# Delete [email protected]'s cryptohome vault (user data directory).
if not options.dont_override_profile:
logging.info('Deleting user\'s cryptohome vault (the user data dir)')
self._cri.RunCmdOnDevice(
['cryptohome', '--action=remove', '--force', '[email protected]'])
if options.profile_dir:
profile_dir = '/home/chronos/Default'
cri.GetCmdOutput(['rm', '-rf', profile_dir])
cri.PushFile(options.profile_dir + '/Default', profile_dir)
cri.GetCmdOutput(['chown', '-R', 'chronos:chronos', profile_dir])
# Escape all commas in the startup arguments we pass to Chrome
# because dbus-send delimits array elements by commas
startup_args = [a.replace(',', '\\,') for a in self.GetBrowserStartupArgs()]
# Restart Chrome with the login extension and remote debugging.
logging.info('Restarting Chrome with flags and login')
args = ['dbus-send', '--system', '--type=method_call',
'--dest=org.chromium.SessionManager',
'/org/chromium/SessionManager',
'org.chromium.SessionManagerInterface.EnableChromeTesting',
'boolean:true',
'array:string:"%s"' % ','.join(startup_args)]
cri.RunCmdOnDevice(args)
if not cri.local:
# Find a free local port.
self._port = util.GetAvailableLocalPort()
# Forward the remote debugging port.
logging.info('Forwarding remote debugging port')
self._forwarder = SSHForwarder(
cri, 'L',
util.PortPair(self._port, self._remote_debugging_port))
# Wait for the browser to come up.
logging.info('Waiting for browser to be ready')
try:
self._WaitForBrowserToComeUp()
self._PostBrowserStartupInitialization()
except:
import traceback
traceback.print_exc()
self.Close()
raise
if self._is_guest:
cros_util.NavigateGuestLogin(self, cri)
# Guest browsing shuts down the current browser and launches an incognito
# browser, which we need to wait for.
self._WaitForBrowserToComeUp()
else:
cros_util.NavigateLogin(self)
logging.info('Browser is up!')
def GetBrowserStartupArgs(self):
self.webpagereplay_remote_http_port = self._cri.GetRemotePort()
self.webpagereplay_remote_https_port = self._cri.GetRemotePort()
args = super(CrOSBrowserBackend, self).GetBrowserStartupArgs()
args.extend([
'--enable-smooth-scrolling',
'--enable-threaded-compositing',
'--enable-per-tile-painting',
'--force-compositing-mode',
'--login-screen=login',
'--remote-debugging-port=%i' % self._remote_debugging_port,
'--start-maximized'])
if not self._is_guest:
args.append('--auth-ext-path=%s' % self._login_ext_dir)
return args
@property
def pid(self):
for pid, process in self._cri.ListProcesses():
for path in self.CHROME_PATHS:
if process.startswith(path):
return int(pid)
return None
def GetRemotePort(self, _):
return self._cri.GetRemotePort()
def __del__(self):
self.Close()
def Close(self):
super(CrOSBrowserBackend, self).Close()
self._RestartUI() # Logs out.
if not self._cri.local:
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
if self._login_ext_dir:
self._cri.RmRF(self._login_ext_dir)
self._login_ext_dir = None
for e in self._options.extensions_to_load:
self._cri.RmRF(os.path.dirname(e.local_path))
self._cri = None
def IsBrowserRunning(self):
return bool(self.pid)
def GetStandardOutput(self):
return 'Cannot get standard output on CrOS'
def CreateForwarder(self, *port_pairs):
assert self._cri
return (browser_backend.DoNothingForwarder(*port_pairs) if self._cri.local
else SSHForwarder(self._cri, 'R', *port_pairs))
def _RestartUI(self):
if self._cri:
logging.info('(Re)starting the ui (logs the user out)')
if self._cri.IsServiceRunning('ui'):
self._cri.RunCmdOnDevice(['restart', 'ui'])
else:
self._cri.RunCmdOnDevice(['start', 'ui'])
class SSHForwarder(object):
def __init__(self, cri, forwarding_flag, *port_pairs):
self._proc = None
if forwarding_flag == 'R':
self._host_port = port_pairs[0].remote_port
command_line = ['-%s%i:localhost:%i' % (forwarding_flag,
port_pair.remote_port,
port_pair.local_port)
for port_pair in port_pairs]
else:
self._host_port = port_pairs[0].local_port
command_line = ['-%s%i:localhost:%i' % (forwarding_flag,
port_pair.local_port,
port_pair.remote_port)
for port_pair in port_pairs]
self._device_port = port_pairs[0].remote_port
self._proc = subprocess.Popen(
cri.FormSSHCommandLine(['sleep', '999999999'], command_line),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=False)
util.WaitFor(lambda: cri.IsHTTPServerRunningOnPort(self._device_port), 60)
@property
def url(self):
assert self._proc
return 'http://localhost:%i' % self._host_port
def Close(self):
if self._proc:
self._proc.kill()
self._proc = None
|
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regexp
except ImportError:
# for Py 2.6
def assert_raises_regexp(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).match(error_message):
raise AssertionError("Error message should match pattern "
"'%s'. '%s' does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
# To remove when we support numpy 1.7
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
# substring will match, the entire message with typo won't
msg = w[0].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if not check_in_message(msg):
raise AssertionError("The message received ('%s') for <%s> is "
"not the one you expected ('%s')"
% (msg, func.__name__, message
))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer',
'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures']
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None, include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter == 'classifier':
estimators = [est for est in estimators
if issubclass(est[1], ClassifierMixin)]
elif type_filter == 'regressor':
estimators = [est for est in estimators
if issubclass(est[1], RegressorMixin)]
elif type_filter == 'transformer':
estimators = [est for est in estimators
if issubclass(est[1], TransformerMixin)]
elif type_filter == 'cluster':
estimators = [est for est in estimators
if issubclass(est[1], ClusterMixin)]
elif type_filter is not None:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# We sort in order to have reproducible test failures
return sorted(estimators)
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
|
# coding=utf-8
#
# Copyright 2010 Brave Labs sp. z o.o.
# All rights reserved.
#
# This source code and all resulting intermediate files are CONFIDENTIAL and
# PROPRIETY TRADE SECRETS of Brave Labs sp. z o.o.
# Use is subject to license terms. See NOTICE file of this project for details.
import logging
import sys
from google.appengine.ext.ndb.query import Cursor
from google.appengine.ext import ndb
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.views.generic.base import View, TemplateResponseMixin
from django.views.generic.edit import ProcessFormView, FormMixin
from ext.utils.textutils import clear_string
from plebe.views.generic.flat import get_or_404
class AuthMixin(object):
def auth(self, request, *args, **kwargs):
if request.user.is_anonymous():
logging.debug("Anonymous redirecting from to login from: %s" % request.get_full_path)
url = "?next=".join((reverse("login"),request.get_full_path()))
return HttpResponseRedirect(url)
class AuthGetMixin(AuthMixin):
def get(self, request, *args, **kwargs):
return self.auth(request, *args, **kwargs) or super(AuthGetMixin, self).get(request, *args, **kwargs)
class AuthPostMixin(AuthMixin):
def post(self, request, *args, **kwargs):
return self.auth(request, *args, **kwargs) or super(AuthPostMixin, self).post(request, *args, **kwargs)
class ListView(View, TemplateResponseMixin):
model = None
query = None
page_size = 20
def get(self, request, *args, **kwargs):
query = self.get_query()
context = self.get_context(query=query)
response = self.render_to_response(context)
if context.has_key('paginator'):
response['X-Cursor'] = context['paginator']['cursor']
response['X-Offset'] = context['paginator']['offset']
return response
def get_template_names(self):
query = self.get_query()
template_name = self.template_name
if not template_name:
# model = ndb.Model._kind_map.get(query.kind)
# app_name, model = model.__module__.split('.',1)[0], query.kind
# template_name = "/".join((app_name, model))
template_name = query.kind
if not template_name.endswith(".html"):
use_rows_template = self.request.is_ajax()
if use_rows_template:
template_name += "/rows.html"
else:
template_name += "/list.html"
return template_name
def get_query(self):
"""
Get the list of items for this view. This must be an interable, and may
be a queryset (in which qs-specific behavior will be enabled).
"""
if not self.query:
if not self.model:
raise ImproperlyConfigured(u"'%s' must define or 'query' or 'model'" % self.__class__.__name__)
self.query = self.model.query()
order = self.request.GET.get('order', None)
if order:
if order.startswith("-"):
self.query = self.query.order(-self.model._properties[order[1:]])
else:
self.query = self.query.order(self.model._properties[order])
return self.query
def get_context(self, **kwargs):
"""
Get the context for this view.
"""
query = kwargs.pop('query')
if not self.page_size:
context = {'list': query}
else:
cursor = self.request.GET.get('cursor')
offset = self.request.GET.get('offset')
if cursor:
cursor = Cursor(urlsafe=cursor)
# TODO: 05.06.12 wooyek named tyiple seems to be empty in template, ivestigate
# paginator = namedtuple('paginator', 'list cursor more')
list, cursor, more = query.fetch_page(self.page_size, start_cursor=cursor)
if cursor:
cursor = cursor.urlsafe()
offset = int(offset) + self.page_size if offset else 1
context = {
'list': list,
'paginator': {'cursor': cursor, 'more': more, 'offset': offset},
}
context.update(kwargs)
return context
class AuthListView(AuthGetMixin, ListView):
pass
class SingleObjectMixin(object):
""" Support for single model get by 'key' or 'id' keyword argument to the view """
model = None
key = None
def get_key(self):
if self.key:
return self.key
key = self.kwargs.get('key')
logging.debug("key: %s" % key)
id = self.kwargs.get('id')
if key:
self.key = ndb.Key(urlsafe=key)
elif id:
self.key = ndb.Key(self.model._get_kind(), int(id))
else:
self.key = self.request.key
logging.debug("self.key: %s" % self.key)
return self.key
@cached_property
def object(self):
key = self.get_key()
return get_or_404(key)
class GetMixin(object):
def get(self, request, *args, **kwargs):
context = self.get_context_data(object=self.object)
assert context, "DetailView needs context data to render properly"
response = self.render_to_response(context)
key = self.get_key()
response['X-KeyId'] = key.id()
response['X-Key'] = key.urlsafe()
return response
def get_context_data(self, **kwargs):
return kwargs
class DetailView(SingleObjectMixin, GetMixin, TemplateResponseMixin, View):
def get_template_names(self):
if self.template_name:
return self.template_name
# app_name, model = self.model.__module__.split('.',1)[0], self.model._get_kind()
# return "%s/%s_detail.html" % (app_name, model)
return self.model._get_kind() + "/detail.html"
class AuthDetailView(AuthGetMixin, DetailView):
pass
class FormNextMixin(View):
"""
Redirects to a form with current HTTP_REFERER info bo be used as a success_url
This mixin needs to be before FormMixin and ProcessFormView as they override the same methods.
"""
next = None
get_next = True
def get_success_url(self):
url = clear_string(self.request.GET.get('next',None))
if not url and self.success_url:
url = self.success_url % self.object.__dict__
if not url:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured("No URL to redirect to. Either provide a url or define a get_absolute_url method on the Model.")
return url
def get(self, request, *args, **kwargs):
if self.get_next:
self.next = clear_string(request.GET.get('next',None))
# leave alone POST and ajax requests
if request.method == "GET" and not request.is_ajax():
referrer = request.META.get('HTTP_REFERER', None)
if self.next is None and referrer is not None:
url = request.get_full_path()
if url.find("?") < 0:
url = "?next=".join((url, referrer))
else:
url = "&next=".join((url, referrer))
return HttpResponseRedirect(url)
return super(FormNextMixin, self).get(request, *args, **kwargs)
class FormView(TemplateResponseMixin, FormNextMixin, FormMixin, ProcessFormView):
pass
class ModelFormView(SingleObjectMixin, FormView):
success_url = None
form_class = None
def get_initial(self):
initial = super(ModelFormView, self).get_initial()
logging.debug("initial: %s" % initial)
initial.update(self.request.GET.items())
logging.debug("initial: %s" % initial)
return initial
def get_form_class(self):
if self.form_class:
return self.form_class
forms_module = self.model.__module__.split('.', -1)[0] + ".forms"
form_class = self.model._get_kind() + "Form"
try:
__import__(forms_module)
return getattr(sys.modules[forms_module], form_class)
except ImportError as ex:
logging.warn(ex, exc_info=True)
msg = "{0} could not import a default forms module '{1}'. Provide a form class or put '{2}' it in the default forms module."
msg = msg.format(self.__class__.__name__, forms_module, form_class)
raise ImproperlyConfigured(msg)
except AttributeError as ex:
logging.warn(ex, exc_info=True)
msg = "{0} could not find a default form '{2}' in '{1}' module. Provide a form class or implement a default one."
msg = msg.format(self.__class__.__name__, forms_module, form_class)
raise ImproperlyConfigured(msg)
def get_form_kwargs(self):
kwargs = super(ModelFormView, self).get_form_kwargs()
kwargs["model"] = self.model
kwargs["instance"] = self.object
return kwargs
def get_template_names(self):
if self.template_name:
return self.template_name
return "%s/form.html" % self.model._get_kind()
def get_context_data(self, **kwargs):
context = kwargs
context.update({
"object" : self.object,
'next': self.next
})
return context
def form_valid(self, form):
# The form should decide if to create new model or update give instance
self.object = form.save()
return super(ModelFormView, self).form_valid(form)
def get(self, request, *args, **kwargs):
response = super(ModelFormView, self).get(request, *args, **kwargs)
obj = self.object
if obj:
key = obj.key
response['X-KeyId'] = key.id()
response['X-Key'] = key.urlsafe()
return response
def post(self, request, *args, **kwargs):
response = super(ModelFormView, self).post(request, *args, **kwargs)
obj = self.object
if obj:
key = obj.key
response['X-KeyId'] = key.id()
response['X-Key'] = key.urlsafe()
return response
class AuthModelFormView(AuthGetMixin, AuthPostMixin, ModelFormView):
pass
class DeleteView(SingleObjectMixin, GetMixin, TemplateResponseMixin, FormNextMixin):
success_url = None
def delete(self, request, *args, **kwargs):
self.get_key().delete()
if request.is_ajax():
return HttpResponse("Deleted")
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured("No URL to redirect to. Provide a success_url.")
def get_template_names(self):
if self.template_name:
return self.template_name
# return "%s/delete.html" % self.model._get_kind()
return "delete.html"
class AuthDeleteView(AuthGetMixin, AuthPostMixin, DeleteView):
pass
class CrudView(object):
"""A helper class to build CRUD urls based on generic views"""
model = None
form_class = None
delete_next = None
list_query = None
list_view_class = ListView
detail_view_class = DetailView
update_view_class = ModelFormView
create_view_class = ModelFormView
delete_view_class = DeleteView
urls_type = "id"
def __init__(self, model, **kwargs):
self.model = model
# this way subclasses won't have to implement their own constructor
# just to save arguments in properties
for key, value in kwargs.iteritems():
setattr(self, key, value)
def list_view(self, **initkwargs):
kwargs = {
'model': self.model,
'list_query': self.list_query,
}
kwargs.update(initkwargs)
kwargs['query'] = kwargs.pop('list_query')
return self.list_view_class.as_view(**kwargs)
def create_view(self, **initkwargs):
kwargs = {
'model': self.model,
'get_next': False,
}
kwargs.update(initkwargs)
return self.create_view_class.as_view(**kwargs)
def update_view(self, **initkwargs):
kwargs = {
'model': self.model,
'form_class': self.form_class,
}
kwargs.update(initkwargs)
return self.update_view_class.as_view(**kwargs)
def detail_view(self, **initkwargs):
kwargs = {
'model': self.model,
}
kwargs.update(initkwargs)
return self.detail_view_class.as_view(**kwargs)
def delete_view(self, **initkwargs):
kwargs = {
'model': self.model,
'success_url': self.delete_next,
}
kwargs.update(initkwargs)
return self.delete_view_class.as_view(**kwargs)
def urls(self):
from django.conf.urls import patterns, url
kind = self.model._get_kind()
prefix = "^" + kind
if self.urls_type == "key":
return patterns('',
url(prefix+"/$", self.list_view(), name=kind+"_list"),
url(prefix+"/create$", self.create_view(), name=kind+"_create"),
url(prefix+"/(?P<key>[-\w]+)/$", self.detail_view(), name=kind+"_detail"),
url(prefix+"/(?P<key>[-\w]+)/update$", self.update_view(), name=kind+"_update"),
url(prefix+"/(?P<key>[-\w]+)/delete$", self.delete_view(success_url=prefix+"/$"), name=kind+"_delete"),
)
return patterns('',
url(prefix+"/$", self.list_view(), name=kind+"_list"),
url(prefix+"/create$", self.create_view(), name=kind+"_create"),
url(prefix+"/(?P<id>\d+)/$", self.detail_view(), name=kind+"_detail"),
url(prefix+"/(?P<id>\d+)/update$", self.update_view(), name=kind+"_update"),
url(prefix+"/(?P<id>\d+)/delete$", self.delete_view(success_url=prefix+"/$"), name=kind+"_delete"),
)
class AuthCrudView(CrudView):
list_view_class = AuthListView
detail_view_class = AuthDetailView
update_view_class = AuthModelFormView
create_view_class = AuthModelFormView
delete_view_class = AuthDeleteView
class Put2PostMixin(object):
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class RpcCreateView(SingleObjectMixin, View):
model = None
def post(self, request, *args, **kwargs):
obj = self.create_object(request.POST)
obj.put()
return HttpResponse(obj.key.urlsafe())
def create_object(self, properties):
properties = dict([(k, properties[k]) for k in properties.keys()])
parent = properties.pop("parent", None)
if parent:
parent = ndb.Key(urlsafe=parent)
return self.model(parent=parent, **properties)
def validate(self, property, value):
return value
class RpcSetView(SingleObjectMixin, View):
def post(self, request, *args, **kwargs):
self.update_properties(request.POST)
self.object.put()
return HttpResponse(self.object.key.urlsafe())
def update_properties(self, properties):
property = properties.get('property', None)
if property:
value = properties.get('value')
self.validate(property, value)
else:
for property, value in properties.items():
setattr(self.object, property, value)
def validate(self, property, value):
return value
|
|
"""This file contains tests for individual management commands
These commands are run via `./manage.py command`."""
from datetime import date, datetime, time
from io import StringIO
import unittest
from unittest.mock import MagicMock
from django.core.management import call_command
from django.test import TestCase
import requests_mock
from .base import TestBase
from ..management.commands.fake_database import (
Command as FakeDatabaseCommand,
Faker
)
from ..management.commands.instructors_activity import (
Command as InstructorsActivityCommand)
from ..management.commands.check_for_workshop_websites_updates import (
Command as WebsiteUpdatesCommand,
WrongWorkshopURL,
datetime_match,
datetime_decode)
from ..models import (
Airport,
Role,
Badge,
Tag,
Person,
Organization,
Event,
Task,
)
class TestInstructorsActivityCommand(TestBase):
def setUp(self):
self.cmd = InstructorsActivityCommand()
# add instructors
self._setUpLessons()
self._setUpBadges()
self._setUpAirports()
self._setUpInstructors()
# and some non-instructors
self._setUpNonInstructors()
# add one event that some instructors took part in
self._setUpOrganizations()
self.event = Event.objects.create(
slug='event-with-tasks',
host=self.org_alpha,
start=date(2015, 8, 30),
)
self._setUpRoles()
self.instructor = Role.objects.get(name='instructor')
self.helper = Role.objects.get(name='helper')
self.learner = Role.objects.get(name='learner')
Task.objects.bulk_create([
Task(event=self.event, person=self.hermione, role=self.instructor),
Task(event=self.event, person=self.ron, role=self.instructor),
Task(event=self.event, person=self.ron, role=self.helper),
Task(event=self.event, person=self.harry, role=self.helper),
Task(event=self.event, person=self.spiderman, role=self.learner),
Task(event=self.event, person=self.blackwidow, role=self.learner),
])
def test_getting_foreign_tasks(self):
"""Make sure we get tasks for other people (per event)."""
person = self.hermione
roles = [self.instructor, self.helper]
tasks = person.task_set.filter(role__in=roles)
# index 0, because Hermione has only one task and we're checking it
fg_tasks = self.cmd.foreign_tasks(tasks, person, roles)[0]
# we should receive other instructors and helpers for self.event
expecting = set([
Task.objects.get(event=self.event, person=self.ron,
role=self.instructor),
Task.objects.get(event=self.event, person=self.ron,
role=self.helper),
Task.objects.get(event=self.event, person=self.harry,
role=self.helper),
])
self.assertEqual(expecting, set(fg_tasks))
def test_fetching_activity(self):
"""Make sure we get correct results for all instructors."""
# include people who don't want to be contacted (other option is tested
# in `self.test_fetching_activity_may_contact_only`)
results = self.cmd.fetch_activity(may_contact_only=False)
instructor_badges = Badge.objects.instructor_badges()
persons = [d['person'] for d in results]
lessons = [list(d['lessons']) for d in results]
instructor_awards = [list(d['instructor_awards']) for d in results]
tasks = [d['tasks'] for d in results]
expecting_persons = [self.hermione, self.harry, self.ron]
expecting_lessons = [list(self.hermione.lessons.all()),
list(self.harry.lessons.all()),
list(self.ron.lessons.all())]
expecting_awards = [
list(person.award_set.filter(badge__in=instructor_badges))
for person in expecting_persons
]
self.assertEqual(set(persons), set(expecting_persons))
self.assertEqual(lessons, expecting_lessons)
self.assertEqual(instructor_awards, expecting_awards)
for task in tasks:
for own_task, foreign_tasks in task:
# we don't test foreign tasks, since they should be tested in
# `self.test_getting_foreign_tasks`
self.assertIn(
own_task,
own_task.person.task_set.filter(
role__name__in=['instructor', 'helper']
)
)
def test_fetching_activity_may_contact_only(self):
"""Make sure we get results only for people we can send emails to."""
# let's make Harry willing to receive emails
self.hermione.may_contact = False
self.harry.may_contact = True
self.ron.may_contact = False
self.hermione.save()
self.harry.save()
self.ron.save()
results = self.cmd.fetch_activity(may_contact_only=True)
persons = [d['person'] for d in results]
expecting_persons = [self.harry]
self.assertEqual(set(persons), set(expecting_persons))
class TestWebsiteUpdatesCommand(TestBase):
maxDiff = None
def setUp(self):
self.cmd = WebsiteUpdatesCommand()
self.fake_cmd = FakeDatabaseCommand()
self.seed = 12345
self.fake_cmd.faker.seed(self.seed)
self.fake_cmd.stdout = StringIO()
self.fake_cmd.fake_organizations()
self.mocked_event_page = """
<html><head>
<meta name="slug" content="2015-07-13-test" />
<meta name="startdate" content="2015-07-13" />
<meta name="enddate" content="2015-07-14" />
<meta name="country" content="us" />
<meta name="venue" content="Euphoric State University" />
<meta name="address" content="Highway to Heaven 42, Academipolis" />
<meta name="latlng" content="36.998977, -109.045173" />
<meta name="language" content="us" />
<meta name="invalid" content="invalid" />
<meta name="instructor" content="Hermione Granger|Ron Weasley" />
<meta name="helper" content="Peter Parker|Tony Stark|Natasha Romanova" />
<meta name="contact" content="[email protected], [email protected]" />
<meta name="eventbrite" content="10000000" />
<meta name="charset" content="utf-8" />
</head>
<body>
<h1>test</h1>
</body></html>
"""
self.expected_metadata_parsed = {
'slug': '2015-07-13-test',
'language': 'US',
'start': date(2015, 7, 13),
'end': date(2015, 7, 14),
'country': 'US',
'venue': 'Euphoric State University',
'address': 'Highway to Heaven 42, Academipolis',
'latitude': 36.998977,
'longitude': -109.045173,
'reg_key': 10000000,
'instructors': ['Hermione Granger', 'Ron Weasley'],
'helpers': ['Peter Parker', 'Tony Stark', 'Natasha Romanova'],
'contact': '[email protected], [email protected]',
}
self.date_serialization_tests = [
# simple tests
('', ''),
('empty string', 'empty string'),
# format-matching
('2016-04-18', date(2016, 4, 18)),
('2016-04-18T16:41:30', datetime(2016, 4, 18, 16, 41, 30)),
('2016-04-18T16:41:30.123',
datetime(2016, 4, 18, 16, 41, 30, 123000)),
('16:41:30', time(16, 41, 30)),
('16:41:30.123', time(16, 41, 30, 123000)),
# format not matching (ie. timezone-aware)
('2016-04-18T16:41:30+02:00', '2016-04-18T16:41:30+02:00'),
('2016-04-18T14:41:30Z', '2016-04-18T14:41:30Z'),
('16:41:30+02:00', '16:41:30+02:00'),
('14:41:30Z', '14:41:30Z'),
]
def test_getting_events(self):
"""Ensure only active events with URL are returned."""
self.fake_cmd.fake_current_events(count=6, add_tags=False)
Event.objects.all().update(start=date.today())
# one active event with URL and one without
e1, e2 = Event.objects.all()[0:2]
e1.completed = False # completed == !active
e1.url = 'https://swcarpentry.github.io/workshop-template/'
e1.save()
e2.completed = False
e2.url = None
e2.save()
# one inactive event with URL and one without
e3, e4 = Event.objects.all()[2:4]
e3.completed = True
e3.url = 'https://datacarpentry.github.io/workshop-template/'
e3.save()
e4.completed = True
e4.url = None
e4.save()
# both active but one very old
e5, e6 = Event.objects.all()[4:6]
e5.completed = False
e5.url = 'https://swcarpentry.github.io/workshop-template2/'
e5.start = date(2014, 1, 1)
e5.save()
e6.completed = False
e6.url = 'https://datacarpentry.github.io/workshop-template2/'
e6.save()
# check
events = set(self.cmd.get_events())
self.assertEqual({e1, e6}, events)
def test_parsing_github_url(self):
"""Ensure `parse_github_url()` correctly parses repository URL."""
url = 'https://github.com/swcarpentry/workshop-template'
expected = 'swcarpentry', 'workshop-template'
self.assertEqual(expected, self.cmd.parse_github_url(url))
with self.assertRaises(WrongWorkshopURL):
url = 'https://swcarpentry.github.io/workshop-template'
self.cmd.parse_github_url(url)
@requests_mock.Mocker()
def test_getting_event_metadata(self, mock):
"""Ensure metadata are fetched and normalized by `get_event_metadata`."""
# underlying `fetch_event_metadata` and `parse_metadata_from_event_website`
# are tested in great detail in `test_util.py`, so here's just a short
# test
website_url = 'https://github.com/swcarpentry/workshop-template'
mock_text = self.mocked_event_page
mock.get(website_url, text=mock_text, status_code=200)
# mock placed, let's test `get_event_metadata`
metadata = self.cmd.get_event_metadata(website_url)
self.assertEqual(metadata, self.expected_metadata_parsed)
def test_deserialization_of_string(self):
"Ensure our datetime matching function works correctly for strings."
for test, expected in self.date_serialization_tests:
with self.subTest(test=test):
self.assertEqual(datetime_match(test), expected)
def test_deserialization_of_list(self):
"""Ensure our datetime matching function works correctly for lists."""
tests = self.date_serialization_tests[:]
tests = list(zip(*tests)) # transpose
test = list(tests[0])
expected = list(tests[1])
self.assertEqual(datetime_decode(test), expected)
def test_deserialization_of_dict(self):
"""Ensure our datetime matching function works correctly for dicts."""
test = {k: k for k, v in self.date_serialization_tests}
expected = {k: v for k, v in self.date_serialization_tests}
self.assertEqual(datetime_decode(test), expected)
def test_deserialization_of_nested(self):
"""Ensure our datetime matching function works correctly for nested
objects/lists."""
# this test uses simpler format
dict_test = {'2016-04-18': '2016-04-18'}
dict_expected = {'2016-04-18': date(2016, 4, 18)}
test = [dict_test.copy(), dict_test.copy(), dict_test.copy()]
expected = [dict_expected.copy(), dict_expected.copy(),
dict_expected.copy()]
self.assertEqual(datetime_decode(test), expected)
test = {'1': test[:]}
expected = {'1': expected[:]}
self.assertEqual(datetime_decode(test), expected)
def test_serialization(self):
"""Ensure serialization uses JSON and works correctly with dates,
datetimes and times.
Ensure derialization from JSON works correctly with dates,
datetimes and times."""
serialized_json = self.cmd.serialize(self.expected_metadata_parsed)
self.assertIn('2015-07-13', serialized_json)
self.assertIn('2015-07-14', serialized_json)
self.assertIn('2015-07-13-test', serialized_json)
self.assertIn('-109.045173', serialized_json)
self.assertIn('36.998977', serialized_json)
self.assertIn('[email protected], [email protected]',
serialized_json)
deserialized_data = self.cmd.deserialize(serialized_json)
self.assertEqual(deserialized_data, self.expected_metadata_parsed)
@unittest.skip('Don\'t know how to test it')
def test_loading_from_github(self):
"""Not sure how to test it, so for now leaving this blank."""
# TODO: mock up github response?
pass
@requests_mock.Mocker()
def test_detecting_changes(self, mock):
"""Make sure metadata changes are detected."""
hash_ = 'abcdefghijklmnopqrstuvwxyz'
e = Event.objects.create(
slug='with-changes', host=Organization.objects.first(),
url='https://swcarpentry.github.io/workshop-template/',
repository_last_commit_hash=hash_,
repository_metadata='',
metadata_changed=False)
branch = MagicMock()
branch.commit = MagicMock()
branch.commit.sha = hash_
changes = self.cmd.detect_changes(branch, e)
self.assertEqual(changes, [])
# more real example: hash changed
hash_ = "zyxwvutsrqponmlkjihgfedcba"
branch.commit.sha = hash_
mock_text = self.mocked_event_page
mock.get(e.url, text=mock_text, status_code=200)
metadata = self.cmd.empty_metadata()
metadata['instructors'] = self.expected_metadata_parsed['instructors']
metadata['latitude'] = self.expected_metadata_parsed['latitude']
metadata['longitude'] = self.expected_metadata_parsed['longitude']
e.repository_metadata = self.cmd.serialize(metadata)
e.save()
changes = self.cmd.detect_changes(branch, e)
expected = [
'Helpers changed',
'Start date changed',
'End date changed',
'Country changed',
'Venue changed',
'Address changed',
'Contact details changed',
'Eventbrite key changed',
]
self.assertEqual(changes, expected)
@requests_mock.Mocker()
def test_initialization(self, mock):
"""Make sure events are initialized to sane values."""
e = Event.objects.create(
slug='with-changes', host=Organization.objects.first(),
url='https://swcarpentry.github.io/workshop-template/',
repository_last_commit_hash='', repository_metadata='',
metadata_changed=False, metadata_all_changes='')
hash_ = 'abcdefghijklmnopqrstuvwxyz'
branch = MagicMock()
branch.commit = MagicMock()
branch.commit.sha = hash_
mock_text = self.mocked_event_page
mock.get(e.url, text=mock_text, status_code=200)
self.cmd.init(branch, e)
e.refresh_from_db()
# metadata updated
self.assertEqual(e.repository_last_commit_hash, hash_)
self.assertEqual(self.cmd.deserialize(e.repository_metadata),
self.expected_metadata_parsed)
self.assertEqual(e.metadata_all_changes, '')
self.assertEqual(e.metadata_changed, False)
@unittest.skip('This command requires internet connection')
def test_running(self):
"""Test running whole command."""
call_command('check_for_workshop_websites_updates')
|
|
# coding: utf-8
import os
import time
import csv
import tempfile
from django.urls import reverse
from django.core.files.storage import get_storage_class, FileSystemStorage
from django.utils import timezone
from xlrd import open_workbook
from onadata.apps.viewer.models.export import Export
from onadata.apps.viewer.views import kml_export, export_download
from onadata.libs.utils.export_tools import generate_export
from onadata.libs.utils.user_auth import http_auth_string
from .test_base import TestBase
class TestFormExports(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.csv_url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string})
self.xls_url = reverse('xls_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string})
def _num_rows(self, content, export_format):
def xls_rows(f):
return open_workbook(file_contents=f).sheets()[0].nrows
def csv_rows(f):
return len([line for line in csv.reader(f.decode().strip().split('\n'))])
num_rows_fn = {
'xls': xls_rows,
'csv': csv_rows,
}
return num_rows_fn[export_format](content)
def test_csv_raw_export_name(self):
response = self.client.get(self.csv_url + '?raw=1')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'], 'attachment;')
def _filter_export_test(self, url, export_format):
"""
Test filter exports. Use sleep to ensure we don't have unique seconds.
Number of rows equals number of surveys plus 1, the header row.
"""
time.sleep(1)
# 1 survey exists before this time
start_time = timezone.now().strftime('%y_%m_%d_%H_%M_%S')
time.sleep(1)
s = self.surveys[1]
self._make_submission(
os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
time.sleep(1)
# 2 surveys exist before this time
end_time = timezone.now().strftime('%y_%m_%d_%H_%M_%S')
time.sleep(1)
# 3 surveys exist in total
s = self.surveys[2]
self._make_submission(
os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
# test restricting to before end time
params = {'end': end_time}
response = self.client.get(url, params)
self.assertEqual(response.status_code, 200)
content = self._get_response_content(response)
self.assertEqual(self._num_rows(content, export_format), 3)
# test restricting to after start time, thus excluding the initial
# submission
params = {'start': start_time}
response = self.client.get(url, params)
self.assertEqual(response.status_code, 200)
content = self._get_response_content(response)
self.assertEqual(self._num_rows(content, export_format), 3)
# test no time restriction
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
content = self._get_response_content(response)
self.assertEqual(self._num_rows(content, export_format), 4)
# test restricting to between start time and end time
params = {'start': start_time, 'end': end_time}
response = self.client.get(url, params)
self.assertEqual(response.status_code, 200)
content = self._get_response_content(response)
self.assertEqual(self._num_rows(content, export_format), 2)
def test_filter_by_date_csv(self):
self._filter_export_test(self.csv_url, 'csv')
def test_filter_by_date_xls(self):
self._filter_export_test(self.xls_url, 'xls')
def test_restrict_csv_export_if_not_shared(self):
response = self.anon.get(self.csv_url)
self.assertEqual(response.status_code, 403)
def test_xls_raw_export_name(self):
response = self.client.get(self.xls_url + '?raw=1')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'], 'attachment;')
def test_restrict_xls_export_if_not_shared(self):
response = self.anon.get(self.xls_url)
self.assertEqual(response.status_code, 403)
def test_restrict_kml_export_if_not_shared(self):
url = reverse(kml_export, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.anon.get(url)
self.assertEqual(response.status_code, 403)
def test_allow_csv_export_if_shared(self):
self.xform.shared_data = True
self.xform.save()
response = self.anon.get(self.csv_url)
self.assertEqual(response.status_code, 200)
def test_allow_xls_export_if_shared(self):
self.xform.shared_data = True
self.xform.save()
response = self.anon.get(self.xls_url)
self.assertEqual(response.status_code, 200)
def test_allow_kml_export_if_shared(self):
self.xform.shared_data = True
self.xform.save()
url = reverse(kml_export, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.anon.get(url)
self.assertEqual(response.status_code, 200)
def test_allow_csv_export(self):
response = self.client.get(self.csv_url)
self.assertEqual(response.status_code, 200)
def test_allow_xls_export(self):
response = self.client.get(self.xls_url)
self.assertEqual(response.status_code, 200)
def test_allow_kml_export(self):
url = reverse(kml_export, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_allow_csv_export_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION': http_auth_string(self.login_username,
self.login_password)
}
response = self.anon.get(self.csv_url, **extra)
self.assertEqual(response.status_code, 200)
def test_allow_xls_export_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION': http_auth_string(self.login_username,
self.login_password)
}
response = self.anon.get(self.xls_url, **extra)
self.assertEqual(response.status_code, 200)
def test_allow_kml_export_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION': http_auth_string(self.login_username,
self.login_password)
}
url = reverse(kml_export, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.anon.get(url, **extra)
self.assertEqual(response.status_code, 200)
def test_allow_export_download_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION': http_auth_string(self.login_username,
self.login_password)
}
# create export
export = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
self.assertTrue(isinstance(export, Export))
url = reverse(export_download, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': export.export_type,
'filename': export.filename
})
response = self.anon.get(url, **extra)
default_storage = get_storage_class()()
if not isinstance(default_storage, FileSystemStorage):
self.assertEqual(response.status_code, 302)
else:
self.assertEqual(response.status_code, 200)
|
|
"""The tests for the InfluxDB component."""
import unittest
from unittest import mock
import influxdb as influx_client
from homeassistant.setup import setup_component
import homeassistant.components.influxdb as influxdb
from homeassistant.const import EVENT_STATE_CHANGED, STATE_OFF, STATE_ON
from tests.common import get_test_home_assistant
@mock.patch('influxdb.InfluxDBClient')
class TestInfluxDB(unittest.TestCase):
"""Test the InfluxDB component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.handler_method = None
self.hass.bus.listen = mock.Mock()
def tearDown(self):
"""Clear data."""
self.hass.stop()
def test_setup_config_full(self, mock_client):
"""Test the setup with full configuration."""
config = {
'influxdb': {
'host': 'host',
'port': 123,
'database': 'db',
'username': 'user',
'password': 'password',
'ssl': 'False',
'verify_ssl': 'False',
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(
EVENT_STATE_CHANGED, self.hass.bus.listen.call_args_list[0][0][0])
self.assertTrue(mock_client.return_value.query.called)
def test_setup_config_defaults(self, mock_client):
"""Test the setup with default configuration."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(
EVENT_STATE_CHANGED, self.hass.bus.listen.call_args_list[0][0][0])
def test_setup_minimal_config(self, mock_client):
"""Test the setup with minimal configuration."""
config = {
'influxdb': {}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
def test_setup_missing_password(self, mock_client):
"""Test the setup with existing username and missing password."""
config = {
'influxdb': {
'username': 'user'
}
}
assert not setup_component(self.hass, influxdb.DOMAIN, config)
def test_setup_query_fail(self, mock_client):
"""Test the setup for query failures."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
}
}
mock_client.return_value.query.side_effect = \
influx_client.exceptions.InfluxDBClientError('fake')
assert not setup_component(self.hass, influxdb.DOMAIN, config)
def _setup(self):
"""Setup the client."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
'exclude': {
'entities': ['fake.blacklisted'],
'domains': ['another_fake']
}
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
def test_event_listener(self, mock_client):
"""Test the event listener."""
self._setup()
valid = {
'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0,
'foo': 'foo'
}
for in_, out in valid.items():
attrs = {
'unit_of_measurement': 'foobars',
'longitude': '1.1',
'latitude': '2.2'
}
state = mock.MagicMock(
state=in_, domain='fake', object_id='entity', attributes=attrs)
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
if isinstance(out, str):
body = [{
'measurement': 'foobars',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'state': out,
'longitude': 1.1,
'latitude': 2.2
},
}]
else:
body = [{
'measurement': 'foobars',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': out,
'longitude': 1.1,
'latitude': 2.2
},
}]
self.handler_method(event)
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_no_units(self, mock_client):
"""Test the event listener for missing units."""
self._setup()
for unit in (None, ''):
if unit:
attrs = {'unit_of_measurement': unit}
else:
attrs = {}
state = mock.MagicMock(
state=1, domain='fake', entity_id='entity-id',
object_id='entity', attributes=attrs)
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'entity-id',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_fail_write(self, mock_client):
"""Test the event listener for write failures."""
self._setup()
state = mock.MagicMock(
state=1, domain='fake', entity_id='entity-id', object_id='entity',
attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
mock_client.return_value.write_points.side_effect = \
influx_client.exceptions.InfluxDBClientError('foo')
self.handler_method(event)
def test_event_listener_states(self, mock_client):
"""Test the event listener against ignored states."""
self._setup()
for state_state in (1, 'unknown', '', 'unavailable'):
state = mock.MagicMock(
state=state_state, domain='fake', entity_id='entity-id',
object_id='entity', attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'entity-id',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if state_state == 1:
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_blacklist(self, mock_client):
"""Test the event listener against a blacklist."""
self._setup()
for entity_id in ('ok', 'blacklisted'):
state = mock.MagicMock(
state=1, domain='fake', entity_id='fake.{}'.format(entity_id),
object_id=entity_id, attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'fake.{}'.format(entity_id),
'tags': {
'domain': 'fake',
'entity_id': entity_id,
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if entity_id == 'ok':
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_blacklist_domain(self, mock_client):
"""Test the event listener against a blacklist."""
self._setup()
for domain in ('ok', 'another_fake'):
state = mock.MagicMock(
state=1, domain=domain,
entity_id='{}.something'.format(domain),
object_id='something', attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': '{}.something'.format(domain),
'tags': {
'domain': domain,
'entity_id': 'something',
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if domain == 'ok':
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_whitelist(self, mock_client):
"""Test the event listener against a whitelist."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
'include': {
'entities': ['fake.included'],
}
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
for entity_id in ('included', 'default'):
state = mock.MagicMock(
state=1, domain='fake', entity_id='fake.{}'.format(entity_id),
object_id=entity_id, attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'fake.{}'.format(entity_id),
'tags': {
'domain': 'fake',
'entity_id': entity_id,
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if entity_id == 'included':
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_whitelist_domain(self, mock_client):
"""Test the event listener against a whitelist."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
'include': {
'domains': ['fake'],
}
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
for domain in ('fake', 'another_fake'):
state = mock.MagicMock(
state=1, domain=domain,
entity_id='{}.something'.format(domain),
object_id='something', attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': '{}.something'.format(domain),
'tags': {
'domain': domain,
'entity_id': 'something',
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if domain == 'fake':
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_invalid_type(self, mock_client):
"""Test the event listener when an attirbute has an invalid type."""
self._setup()
valid = {
'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0,
'foo': 'foo'
}
for in_, out in valid.items():
attrs = {
'unit_of_measurement': 'foobars',
'longitude': '1.1',
'latitude': '2.2',
'invalid_attribute': ['value1', 'value2']
}
state = mock.MagicMock(
state=in_, domain='fake', object_id='entity', attributes=attrs)
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
if isinstance(out, str):
body = [{
'measurement': 'foobars',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'state': out,
'longitude': 1.1,
'latitude': 2.2,
'invalid_attribute_str': "['value1', 'value2']"
},
}]
else:
body = [{
'measurement': 'foobars',
'tags': {
'domain': 'fake',
'entity_id': 'entity',
},
'time': 12345,
'fields': {
'value': float(out),
'longitude': 1.1,
'latitude': 2.2,
'invalid_attribute_str': "['value1', 'value2']"
},
}]
self.handler_method(event)
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
mock_client.return_value.write_points.reset_mock()
def test_event_listener_default_measurement(self, mock_client):
"""Test the event listener with a default measurement."""
config = {
'influxdb': {
'host': 'host',
'username': 'user',
'password': 'pass',
'default_measurement': 'state',
'exclude': {
'entities': ['fake.blacklisted']
}
}
}
assert setup_component(self.hass, influxdb.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
for entity_id in ('ok', 'blacklisted'):
state = mock.MagicMock(
state=1, domain='fake', entity_id='fake.{}'.format(entity_id),
object_id=entity_id, attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'measurement': 'state',
'tags': {
'domain': 'fake',
'entity_id': entity_id,
},
'time': 12345,
'fields': {
'value': 1,
},
}]
self.handler_method(event)
if entity_id == 'ok':
self.assertEqual(
mock_client.return_value.write_points.call_count, 1
)
self.assertEqual(
mock_client.return_value.write_points.call_args,
mock.call(body)
)
else:
self.assertFalse(mock_client.return_value.write_points.called)
mock_client.return_value.write_points.reset_mock()
|
|
#!/usr/bin/env python
""" NumPy is the fundamental package for array computing with Python.
It provides:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
- and much more
Besides its obvious scientific uses, NumPy can also be used as an efficient
multi-dimensional container of generic data. Arbitrary data-types can be
defined. This allows NumPy to seamlessly and speedily integrate with a wide
variety of databases.
All NumPy wheels distributed on PyPI are BSD licensed.
"""
from __future__ import division, print_function
DOCLINES = (__doc__ or '').split("\n")
import os
import sys
import subprocess
import textwrap
if sys.version_info[:2] < (3, 5):
raise RuntimeError("Python version >= 3.5 required.")
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MAJOR = 1
MINOR = 18
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except (subprocess.SubprocessError, OSError):
GIT_REVISION = "Unknown"
if not GIT_REVISION:
# this shouldn't happen but apparently can (see gh-8512)
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# numpy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__NUMPY_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
raise ImportError("Unable to import git_revision. Try removing "
"numpy/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='numpy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
#
# To compare versions robustly, use `numpy.lib.NumpyVersion`
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('numpy')
config.add_data_files(('numpy', 'LICENSE.txt'))
config.get_version('numpy/version.py') # sets config.version
return config
def check_submodules():
""" verify that the submodules are checked out and clean
use `git submodule update --init`; on failure
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule %s missing' % p)
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: %s' % line)
class concat_license_files():
"""Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation
Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see
gh-13447). This makes GitHub state correctly how NumPy is licensed.
"""
def __init__(self):
self.f1 = 'LICENSE.txt'
self.f2 = 'LICENSES_bundled.txt'
def __enter__(self):
"""Concatenate files and remove LICENSES_bundled.txt"""
with open(self.f1, 'r') as f1:
self.bsd_text = f1.read()
with open(self.f1, 'a') as f1:
with open(self.f2, 'r') as f2:
self.bundled_text = f2.read()
f1.write('\n\n')
f1.write(self.bundled_text)
def __exit__(self, exception_type, exception_value, traceback):
"""Restore content of both files"""
with open(self.f1, 'w') as f:
f.write(self.bsd_text)
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
with concat_license_files():
sdist.run(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
for d in ('random',):
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'numpy/{0}'.format(d)],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
args = sys.argv[1:]
if not args:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
for command in info_commands:
if command in args:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src')
for command in good_commands:
if command in args:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in args:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install numpy` (last NumPy release on PyPi)
"""))
return True
if '--help' in args or '-h' in sys.argv[1]:
print(textwrap.dedent("""
NumPy-specific help
-------------------
To install NumPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest NumPy release
from PyPi, use `pip install numpy`.
For help with build/installation issues, please ask on the
numpy-discussion mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/numpy/numpy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python runtests.py` (to build and test)
- `python runtests.py --no-build` (to test installed numpy)
- `>>> numpy.test()` (run tests for installed numpy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
build_sphinx="""
`setup.py build_sphinx` is not supported, use the
Makefile under doc/""",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in args:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# Commands that do more than print info, but also don't need Cython and
# template parsing.
other_commands = ['egg_info', 'install_egg_info', 'rotate']
for command in other_commands:
if command in args:
return False
# If we got here, we didn't detect what setup.py command was given
import warnings
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates", stacklevel=2)
return True
def setup_package():
src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# Rewrite the version file everytime
write_version_py()
# The f2py scripts that will be installed
if sys.platform == 'win32':
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
]
else:
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
'f2py%s = numpy.f2py.f2py2e:main' % sys.version_info[:1],
'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],
]
metadata = dict(
name = 'numpy',
maintainer = "NumPy Developers",
maintainer_email = "[email protected]",
description = DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
url = "https://www.numpy.org",
author = "Travis E. Oliphant et al.",
download_url = "https://pypi.python.org/pypi/numpy",
project_urls={
"Bug Tracker": "https://github.com/numpy/numpy/issues",
"Documentation": "https://docs.scipy.org/doc/numpy/",
"Source Code": "https://github.com/numpy/numpy",
},
license = 'BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
cmdclass={"sdist": sdist_checked,
},
python_requires='>=3.5',
zip_safe=False,
entry_points={
'console_scripts': f2py_cmds
},
)
if "--force" in sys.argv:
run_build = True
sys.argv.remove('--force')
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = parse_setuppy_commands()
from setuptools import setup
if run_build:
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
if not 'sdist' in sys.argv:
# Generate Cython sources, unless we're generating an sdist
generate_cython()
metadata['configuration'] = configuration
else:
# Version number is added to metadata inside configuration() if build
# is run.
metadata['version'] = get_version_info()[0]
try:
setup(**metadata)
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
# This may avoid problems where numpy is installed via ``*_requires`` by
# setuptools, the global namespace isn't reset properly, and then numpy is
# imported later (which will then fail to load numpy extension modules).
# See gh-7956 for details
del builtins.__NUMPY_SETUP__
|
|
# Copyright (c) 2011 Gennadiy Shafranovich
# Licensed under the MIT license
# see LICENSE file for copying permission.
import json
from datetime import date
from datetime import datetime
from datetime import timedelta
from urllib import quote_plus
from urllib import urlopen
from django.db import models
#-------------\
# CONSTANTS |
#-------------/
CASH_SYMBOL = '*CASH'
PRICE_HISTORY_LIMIT_IN_DAYS = 365 * 10
#----------\
# MODELS |
#----------/
class Quote(models.Model):
symbol = models.CharField(max_length = 10, unique = True)
name = models.CharField(max_length = 255)
price = models.FloatField()
last_trade = models.DateTimeField()
cash_equivalent = models.BooleanField()
class Meta:
db_table = 'quote'
def __unicode__(self):
return '%s - %s' % (self.symbol, self.name)
class PriceHistory(models.Model):
quote = models.ForeignKey(Quote)
as_of_date = models.DateTimeField()
price = models.FloatField()
class Meta:
db_table = 'price_history'
unique_together = ( 'quote', 'as_of_date' )
def __unicode__(self):
return '%s @ %.2f on %s' % (self.quote.symbol, self.price, self.as_of_date.strftime('%Y-%m-%d'))
#------------\
# SERVICES |
#------------/
def price_as_of(quote, as_of):
"""Get the price for quote as of a specific date."""
if quote.cash_equivalent or quote.last_trade.date() == as_of:
return quote.price
else:
candidates = quote.pricehistory_set.filter(as_of_date__lte = as_of.strftime('%Y-%m-%d')).order_by('-as_of_date')[0:1]
return (candidates[0].price if candidates.count() > 0 else 0)
def previous_close_price(quote):
"""Get the previous close price for a quote."""
return price_as_of(quote, quote.last_trade.date() - timedelta(days = 1))
def quote_by_symbol(symbol):
"""Retrieve a quote by symbol."""
return quotes_by_symbols([ symbol ])[0]
def quotes_by_symbols(symbols, force_retrieve = False):
"""Retrieve a quotes by a list of symbols."""
# load or prime quotes for each symbol
existing_quotes = dict([ (q.symbol, q) for q in Quote.objects.filter(symbol__in = symbols) ])
quotes = { }
symbols_to_retrieve = []
for symbol in symbols:
quote = existing_quotes.get(symbol, None)
exists = True
if quote == None:
quote = Quote(symbol = symbol, last_trade = datetime.now())
exists = False
quotes[symbol] = quote
if symbol == CASH_SYMBOL and not exists:
quote.name = 'US Dollars'
quote.price = 1.0
quote.cash_equivalent = True
quote.changed = True
elif not exists or force_retrieve:
quote.price = 0.0
quote.changed = True
symbols_to_retrieve.append(symbol)
else:
quote.changed = False
# retrieve fresh prices from yahoo
if len(symbols_to_retrieve) > 0:
csv_url = ('http://download.finance.yahoo.com/d/quotes.csv?s=%s&f=sl1d1t1n&e=.csv' % (','.join(symbols_to_retrieve)))
csv_columns = 'symbol,price,date,time,name'
for row in _yql_csv_to_json(csv_url, csv_columns):
price = row['price']
tradeDate = row['date']
tradeTime = row['time']
quote = quotes.get(row['symbol'])
quote.cash_equivalent = price.endswith('%')
quote.price = (1.0 if quote.cash_equivalent else float(price))
quote.name = row['name']
if tradeDate != 'N/A' and tradeTime != 'N/A':
month, day, year = [int(f) for f in tradeDate.split('/')]
time = datetime.strptime(tradeTime, '%I:%M%p')
quote.last_trade = datetime(year, month, day, time.hour, time.minute, time.second)
# save all changes
for quote in quotes.values():
if quote.changed:
quote.save()
if not quote.cash_equivalent and quote.price > 0.0 and quote.pricehistory_set.count() == 0:
refresh_price_history(quote)
return quotes.values()
def refresh_price_history(quote):
start_date = quote.last_trade + timedelta(days = 0 - PRICE_HISTORY_LIMIT_IN_DAYS)
end_date = quote.last_trade + timedelta(days = 1)
csv_columns = 'date,open,high,low,close,volume,adj_close'
csv_url = ('http://ichart.finance.yahoo.com/table.csv?s=%s&a=%.2d&b=%.2d&c=%.4d&d=%.2d&e=%.2d&f=%.4d&g=d&ignore=.csv' % (
quote.symbol,
(start_date.month - 1),
start_date.day,
start_date.year,
(end_date.month - 1),
end_date.day,
end_date.year)
)
to_remove = { }
for history in quote.pricehistory_set.filter(as_of_date__gte = start_date.date()):
to_remove[history.as_of_date.date()] = history
to_save = []
for row in _yql_csv_to_json(csv_url, csv_columns, PRICE_HISTORY_LIMIT_IN_DAYS, 2):
as_of_date = datetime.strptime(row['date'], '%Y-%m-%d')
price = float(row['adj_close'])
history = to_remove.get(as_of_date.date())
if history == None:
history = PriceHistory()
history.quote = quote
history.as_of_date = as_of_date
history.price = price
else:
del(to_remove[as_of_date.date()])
if abs(history.price - price) > 0.0001:
history.price = price
else:
continue
to_save.append(history)
if len(to_remove) > 0:
ids = [ h.id for h in to_remove.values() ]
PriceHistory.objects.filter(id__in = ids).delete()
if len(to_save) > 0:
for history in to_save:
history.save()
quote.history_date = datetime.now()
quote.save()
return quote
#-------------------\
# LOCAL FUNCTIONS |
#-------------------/
def _yql_csv_to_json(csv_url, csv_columns, limit = None, offset = None):
u = None
try:
yql_suffix = ''
if limit != None and offset != None:
yql_suffix = yql_suffix + (' limit %d offset %d' % (limit, offset))
yql_query = ("select * from csv where url='%s' and columns='%s' %s" % (csv_url, csv_columns, yql_suffix))
u = urlopen('http://query.yahooapis.com/v1/public/yql?q=%s&format=json&callback=' % quote_plus(yql_query))
packet = json.loads(u.read())
out = [ ]
if packet.has_key('query'):
count = packet['query']['count']
if count == 1:
out.append(packet['query']['results']['row'])
elif count > 0:
out = packet['query']['results']['row']
return out
finally:
if u != None:
u.close()
|
|
# =============================================================================
# pre_migrate.py - plugin for preparing for migrating classic XR to eXR/fleXR
#
# Copyright (c) 2013, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import csv
import os
import re
import subprocess
import pexpect
from csmpe.plugins import CSMPlugin
from csmpe.core_plugins.csm_install_operations.utils import ServerType, is_empty, concatenate_dirs
from simple_server_helper import TFTPServer, FTPServer, SFTPServer
from hardware_audit import Plugin as HardwareAuditPlugin
from migration_lib import log_and_post_status
from csmpe.core_plugins.csm_get_inventory.ios_xr.plugin import get_package, get_inventory
MINIMUM_RELEASE_VERSION_FOR_MIGRATION = "6.1.3"
NOX_FOR_MAC = "nox-mac64.bin"
NOX_64_BINARY = "nox-linux-64.bin"
TIMEOUT_FOR_COPY_CONFIG = 3600
TIMEOUT_FOR_COPY_IMAGE = 1800
TIMEOUT_FOR_FPD_UPGRADE = 9600
IMAGE_LOCATION = "harddisk:/"
CONFIG_LOCATION = "harddiskb:/"
XR_CONFIG_IN_CSM = "xr.cfg"
ADMIN_CONFIG_IN_CSM = "admin.cfg"
CONVERTED_XR_CONFIG_IN_CSM = "xr.iox"
CONVERTED_ADMIN_CAL_CONFIG_IN_CSM = "admin.cal"
CONVERTED_ADMIN_XR_CONFIG_IN_CSM = "admin.iox"
FINAL_CAL_CONFIG = "cXR_admin_plane_converted_eXR.cfg"
FINAL_XR_CONFIG = "cXR_xr_plane_converted_eXR.cfg"
# XR_CONFIG_ON_DEVICE = "iosxr.cfg"
# ADMIN_CAL_CONFIG_ON_DEVICE = "admin_calvados.cfg"
# ADMIN_XR_CONFIG_ON_DEVICE = "admin_iosxr.cfg"
class Plugin(CSMPlugin):
"""
A plugin for preparing device for migration from
ASR9K IOS-XR (a.k.a. XR) to ASR9K IOS-XR 64 bit (a.k.a. eXR)
This plugin does the following:
1. Check several pre-requisites
2. Resize the eUSB partition(/harddiskb:/ on XR)
3. Migrate the configurations with NoX and upload them to device
4. Copy the eXR image to /harddiskb:/
5. Upgrade some FPD's if needed.
Console access is needed.
"""
name = "Pre-Migrate Plugin"
platforms = {'ASR9K'}
phases = {'Pre-Migrate'}
os = {'XR'}
node_pattern = re.compile("^\d+(/\w+)+$")
def _save_show_platform(self):
"""Save the output of 'show platform' to session log"""
cmd = "show platform"
# show platform can take more than 1 minute after router reload. Issue No. 47
output = self.ctx.send(cmd, timeout=600)
file_name = self.ctx.save_to_file(cmd, output)
if file_name is None:
self.ctx.warning("Unable to save '{}' output to file: {}".format(cmd, file_name))
def _ping_repository_check(self, repo_url):
"""Test ping server repository ip from device"""
repo_ip = re.search("[/@](\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/", repo_url)
if not repo_ip:
self.ctx.error("Bad hostname for server repository. Please check the settings in CSM.")
output = self.ctx.send("ping {}".format(repo_ip.group(1)))
if "100 percent" not in output:
self.ctx.error("Failed to ping server repository {} on device." +
"Please check session.log.".format(repo_ip.group(1)))
def _all_configs_supported(self, nox_output):
"""Check text output from running NoX on system. Only return True if all configs are supported by eXR."""
pattern = "Filename[\sA-Za-z\n]*[-\s]*\S*\s+\d*\s+\d*\(\s*\d*%\)\s+\d*\(\s*\d*%\)\s+\d*\(\s*\d*%\)\s+(\d*)"
match = re.search(pattern, nox_output)
if match:
if match.group(1) != "0":
return False
return True
def _upload_files_to_server_repository(self, sourcefiles, server, destfilenames):
"""
Upload files from their locations in the host linux system to the FTP/TFTP/SFTP server repository.
Arguments:
:param sourcefiles: a list of string file paths that each points to a file on the system where CSM is hosted.
The paths are all relative to csm/csmserver/.
For example, if the source file is in csm_data/migration/filename,
sourcefiles = ["../../csm_data/migration/filename"]
:param server: the associated server repository object stored in CSM database
:param destfilenames: a list of string filenames that the source files should be named after being copied to
the designated directory in the server repository. i.e., ["thenewfilename"]
:return: True if no error occurred.
"""
server_type = server.server_type
selected_server_directory = self.ctx._csm.install_job.server_directory
if server_type == ServerType.TFTP_SERVER:
tftp_server = TFTPServer(server)
for x in range(0, len(sourcefiles)):
log_and_post_status(self.ctx, "Copying file {} to {}/{}/{}.".format(sourcefiles[x],
server.server_directory,
selected_server_directory,
destfilenames[x]))
try:
tftp_server.upload_file(sourcefiles[x], destfilenames[x],
sub_directory=selected_server_directory)
except:
self.ctx.error("Exception was thrown while " +
"copying file {} to {}/{}/{}.".format(sourcefiles[x],
server.server_directory,
selected_server_directory,
destfilenames[x]))
elif server_type == ServerType.FTP_SERVER:
ftp_server = FTPServer(server)
for x in range(0, len(sourcefiles)):
log_and_post_status(self.ctx, "Copying file {} to {}/{}/{}.".format(sourcefiles[x],
server.server_directory,
selected_server_directory,
destfilenames[x]))
try:
ftp_server.upload_file(sourcefiles[x], destfilenames[x],
sub_directory=selected_server_directory)
except:
self.ctx.error("Exception was thrown while " +
"copying file {} to {}/{}/{}.".format(sourcefiles[x],
server.server_directory,
selected_server_directory,
destfilenames[x]))
elif server_type == ServerType.SFTP_SERVER:
sftp_server = SFTPServer(server)
for x in range(0, len(sourcefiles)):
log_and_post_status(self.ctx, "Copying file {} to {}/{}/{}.".format(sourcefiles[x],
server.server_directory,
selected_server_directory,
destfilenames[x]))
try:
sftp_server.upload_file(sourcefiles[x], destfilenames[x],
sub_directory=selected_server_directory)
except:
self.ctx.error("Exception was thrown while " +
"copying file {} to {}/{}/{}.".format(sourcefiles[x],
server.server_directory,
selected_server_directory,
destfilenames[x]))
else:
self.ctx.error("Pre-Migrate does not support {} server repository.".format(server_type))
return True
def _copy_files_to_device(self, server, repository, source_filenames, dest_files, timeout=600):
"""
Copy files from their locations in the user selected server directory in the FTP/TFTP/SFTP server repository
to locations on device.
Arguments:
:param server: the server object fetched from database
:param repository: the string url link that points to the location of files in the SFTP server repository
:param source_filenames: a list of string filenames in the designated directory in the server repository.
:param dest_files: a list of string file paths that each points to a file to be created on device.
i.e., ["harddiskb:/asr9k-mini-x64.tar"]
:param timeout: the timeout for the sftp copy operation on device. The default is 10 minutes.
:return: None if no error occurred.
"""
if server.server_type == ServerType.FTP_SERVER or server.server_type == ServerType.TFTP_SERVER:
self._copy_files_from_ftp_tftp_to_device(repository, source_filenames, dest_files, timeout=timeout)
elif server.server_type == ServerType.SFTP_SERVER:
self._copy_files_from_sftp_to_device(server, source_filenames, dest_files, timeout=timeout)
else:
self.ctx.error("Pre-Migrate does not support {} server repository.".format(server.server_type))
def _copy_files_from_ftp_tftp_to_device(self, repository, source_filenames, dest_files, timeout=600):
"""
Copy files from their locations in the user selected server directory in the FTP or TFTP server repository
to locations on device.
Arguments:
:param repository: the string url link that points to the location of files in the FTP/TFTP server repository,
with no extra '/' in the end. i.e., tftp://223.255.254.245/tftpboot
:param source_filenames: a list of string filenames in the designated directory in the server repository.
:param dest_files: a list of string file paths that each points to a file to be created on device.
i.e., ["harddiskb:/asr9k-mini-x64.tar"]
:param timeout: the timeout for the 'copy' CLI command on device. The default is 10 minutes.
:return: None if no error occurred.
"""
def send_newline(ctx):
ctx.ctrl.sendline()
return True
def error(ctx):
ctx.message = "Error copying file."
return False
for x in range(0, len(source_filenames)):
command = "copy {}/{} {}".format(repository, source_filenames[x], dest_files[x])
CONFIRM_FILENAME = re.compile("Destination filename.*\?")
CONFIRM_OVERWRITE = re.compile("Copy : Destination exists, overwrite \?\[confirm\]")
COPIED = re.compile(".+bytes copied in.+ sec")
COPYING = re.compile("C" * 50)
NO_SUCH_FILE = re.compile("%Error copying.*\(Error opening source file\): No such file or directory")
ERROR_COPYING = re.compile("%Error copying")
PROMPT = self.ctx.prompt
TIMEOUT = self.ctx.TIMEOUT
events = [PROMPT, CONFIRM_FILENAME, CONFIRM_OVERWRITE, COPIED, COPYING,
TIMEOUT, NO_SUCH_FILE, ERROR_COPYING]
transitions = [
(CONFIRM_FILENAME, [0], 1, send_newline, timeout),
(CONFIRM_OVERWRITE, [1], 2, send_newline, timeout),
(COPIED, [1, 2], 3, None, 20),
(COPYING, [1, 2], 2, send_newline, timeout),
(PROMPT, [3], -1, None, 0),
(TIMEOUT, [0, 1, 2, 3], -1, error, 0),
(NO_SUCH_FILE, [0, 1, 2, 3], -1, error, 0),
(ERROR_COPYING, [0, 1, 2, 3], -1, error, 0),
]
log_and_post_status(self.ctx, "Copying {}/{} to {} on device".format(repository,
source_filenames[x],
dest_files[x]))
if not self.ctx.run_fsm("Copy file from tftp/ftp to device", command, events, transitions,
timeout=20, max_transitions=40):
self.ctx.error("Error copying {}/{} to {} on device".format(repository,
source_filenames[x],
dest_files[x]))
output = self.ctx.send("dir {}".format(dest_files[x]))
if "No such file" in output:
self.ctx.error("Failed to copy {}/{} to {} on device".format(repository,
source_filenames[x],
dest_files[x]))
def _copy_files_from_sftp_to_device(self, server, source_filenames, dest_files, timeout=600):
"""
Copy files from their locations in the user selected server directory in the SFTP server repository
to locations on device.
Arguments:
:param server: the sftp server object
:param source_filenames: a list of string filenames in the designated directory in the server repository.
:param dest_files: a list of string file paths that each points to a file to be created on device.
i.e., ["harddiskb:/asr9k-mini-x64.tar"]
:param timeout: the timeout for the sftp copy operation on device. The default is 10 minutes.
:return: None if no error occurred.
"""
source_path = server.server_url
remote_directory = concatenate_dirs(server.server_directory, self.ctx._csm.install_job.server_directory)
if not is_empty(remote_directory):
source_path += ":{}".format(remote_directory)
def pause_logfile_stream(ctx):
"""
This was made necessary because during sftp download, when file is large,
the number of transferred bytes keeps changing and session log takes so much
time in reading and writing the changing number that it is still doing that
long after the operation is complete.
"""
if ctx.ctrl._session.logfile_read:
ctx.ctrl._session.logfile_read = None
return True
def send_password(ctx):
ctx.ctrl.sendline(server.password)
return pause_logfile_stream(ctx)
def send_yes(ctx):
ctx.ctrl.sendline("yes")
return pause_logfile_stream(ctx)
def reinstall_logfile(ctx):
if ctx.ctrl._logfile_fd and (not ctx.ctrl._session.logfile_read):
ctx.ctrl._session.logfile_read = ctx.ctrl._logfile_fd
else:
ctx.message = "Error reinstalling session.log."
return False
return True
def timeout_error(ctx):
reinstall_logfile(ctx)
ctx.message = "Timed out while copying file from sftp."
return False
def no_such_file_error(ctx):
reinstall_logfile(ctx)
ctx.message = "Copying the file from sftp failed because it is not found in the specified path."
return False
def download_abort_error(ctx):
reinstall_logfile(ctx)
ctx.message = "Copying the file from sftp failed. Download was aborted."
return False
for x in range(0, len(source_filenames)):
if is_empty(server.vrf):
command = "sftp {}@{}/{} {}".format(server.username, source_path, source_filenames[x], dest_files[x])
else:
command = "sftp {}@{}/{} {} vrf {}".format(server.username, source_path, source_filenames[x],
dest_files[x], server.vrf)
PASSWORD = re.compile("Password:")
CONFIRM_OVERWRITE = re.compile("Overwrite.*\[yes/no\]\:")
COPIED = re.compile("bytes copied in", re.MULTILINE)
NO_SUCH_FILE = re.compile("src.*does not exist")
DOWNLOAD_ABORTED = re.compile("Download aborted.")
PROMPT = self.ctx.prompt
TIMEOUT = self.ctx.TIMEOUT
events = [PROMPT, PASSWORD, CONFIRM_OVERWRITE, COPIED, TIMEOUT, NO_SUCH_FILE, DOWNLOAD_ABORTED]
transitions = [
(PASSWORD, [0], 1, send_password, timeout),
(CONFIRM_OVERWRITE, [1], 2, send_yes, timeout),
(COPIED, [1, 2], -1, reinstall_logfile, 0),
(PROMPT, [1, 2], -1, reinstall_logfile, 0),
(TIMEOUT, [0, 1, 2], -1, timeout_error, 0),
(NO_SUCH_FILE, [0, 1, 2], -1, no_such_file_error, 0),
(DOWNLOAD_ABORTED, [0, 1, 2], -1, download_abort_error, 0),
]
log_and_post_status(self.ctx, "Copying {}/{} to {} on device".format(source_path,
source_filenames[x],
dest_files[x]))
if not self.ctx.run_fsm("Copy file from sftp to device", command, events, transitions, timeout=20):
self.ctx.error("Error copying {}/{} to {} on device".format(source_path,
source_filenames[x],
dest_files[x]))
output = self.ctx.send("dir {}".format(dest_files[x]))
if "No such file" in output:
self.ctx.error("Failed to copy {}/{} to {} on device".format(source_path,
source_filenames[x],
dest_files[x]))
def _run_migration_on_config(self, fileloc, filename, nox_to_use, hostname):
"""
Run the migration tool - NoX - on the configurations copied out from device.
The conversion/migration is successful if the number under 'Total' equals to
the number under 'Known' in the text output.
If it's successful, but not all existing configs are supported by eXR, create two
new log files for the supported and unsupported configs in session log directory.
The unsupported configs will not appear on the converted configuration files.
Log a warning about the removal of unsupported configs, but this is not considered
as error.
If it's not successful, meaning that there are some configurations not known to
the NoX tool, in this case, create two new log files for the supported and unsupported
configs in session log directory. After that, error out.
:param fileloc: string location where the config needs to be converted/migrated is,
without the '/' in the end. This location is relative to csm/csmserver/
:param filename: string filename of the config
:param nox_to_use: string name of NoX binary executable.
:param hostname: hostname of device, as recorded on CSM.
:return: None if no error occurred.
"""
try:
commands = [subprocess.Popen(["chmod", "+x", nox_to_use]),
subprocess.Popen([nox_to_use, "-f", os.path.join(fileloc, filename)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
]
nox_output, nox_error = commands[1].communicate()
except OSError:
self.ctx.error("Failed to run the configuration migration tool {} on config file {} - OSError.".format(
nox_to_use,
os.path.join(fileloc, filename))
)
if nox_error:
self.ctx.error("Failed to run the configuration migration tool on the admin configuration " +
"we retrieved from device - {}.".format(nox_error))
if filename.split('.')[0] == 'admin':
if (not os.path.isfile(os.path.join(fileloc, CONVERTED_ADMIN_CAL_CONFIG_IN_CSM))) or \
(not os.path.isfile(os.path.join(fileloc, CONVERTED_ADMIN_XR_CONFIG_IN_CSM))):
self.ctx.error("Failed to convert the ASR9K admin configuration with NoX tool.")
elif not os.path.isfile(os.path.join(fileloc, CONVERTED_XR_CONFIG_IN_CSM)):
self.ctx.error("Failed to convert the ASR9K IOS-XR configuration with NoX tool.")
conversion_success = False
match = re.search("Filename[\sA-Za-z\n]*[-\s]*\S*\s+(\d*)\s+\d*\(\s*\d*%\)\s+\d*\(\s*\d*%\)\s+(\d*)",
nox_output)
if match:
if match.group(1) == match.group(2):
conversion_success = True
if filename == ADMIN_CONFIG_IN_CSM:
supported_log_name = "supported_config_in_admin_configuration"
unsupported_log_name = "unsupported_config_in_admin_configuration"
else:
supported_log_name = "supported_config_in_xr_configuration"
unsupported_log_name = "unsupported_config_in_xr_configuration"
if conversion_success:
if self._all_configs_supported(nox_output):
log_and_post_status(self.ctx, "Configuration {} was migrated successfully. ".format(filename) +
"No unsupported configurations found.")
else:
self._create_config_logs(os.path.join(fileloc, filename.split(".")[0] + ".csv"),
supported_log_name, unsupported_log_name,
hostname, filename)
log_and_post_status(self.ctx, "Configurations that are unsupported in eXR were removed in " +
"{}. Please look into {} and {}.".format(filename,
unsupported_log_name,
supported_log_name))
else:
self._create_config_logs(os.path.join(fileloc, filename.split(".")[0] + ".csv"),
supported_log_name, unsupported_log_name, hostname, filename)
self.ctx.error("Unknown configurations found. Please look into {} ".format(unsupported_log_name) +
"for unprocessed configurations, and {} for known/supported configurations".format(
unsupported_log_name, supported_log_name)
)
def _resize_eusb(self):
"""Resize the eUSB partition on device - Run the /pkg/bin/resize_eusb script on device(from ksh)."""
self.ctx.send("run", wait_for_string="#")
output = self.ctx.send("ksh /pkg/bin/resize_eusb", wait_for_string="#")
self.ctx.send("exit")
if "Pre-Migration Operation Completed." not in output:
self.ctx.error("Pre-Migrate partition check failed. Please check session.log.")
# output = self.ctx.send("show media")
# eusb_size = re.search("/harddiskb:.* ([.\d]+)G", output)
# if not eusb_size:
# self.ctx.error("Unexpected output from CLI 'show media'.")
# if eusb_size.group(1) < "1.0":
# self.ctx.error("/harddiskb:/ is smaller than 1 GB after running /pkg/bin/resize_eusb. " +
# "Please make sure that the device has either RP2 or RSP4.")
def _check_fpd(self, fpd_relevant_nodes):
"""
Check the versions of migration related FPD's on device. Return a dictionary
that tells which FPD's on which nodes require successful FPD upgrade later on.
:param fpd_relevant_nodes: a dictionary. Keys are strings representing all node locations
on device parsed from output of "admin show platform".
Values are integers. Value can either be 0 or 1.
value 1 means that we actually will need to make sure that the
FPD upgrade later on for this node location completes successfully,
value 0 means that we don't need to check if the
FPD upgrade later on for this node location is successful or not.
:return: a dictionary with string FPD type as key, and a set of the string names of
node locations as value.
"""
fpdtable = self.ctx.send("show hw-module fpd location all")
subtype_to_locations_need_upgrade = {}
last_location = None
for line in fpdtable.split('\n'):
first_word = line.split(' ', 1)[0]
if self.node_pattern.match(first_word):
# since fpd_relevant_nodes is loaded from db, the keys are
# unicode instead of byte strings
indicator = fpd_relevant_nodes.get(unicode(first_word, encoding="latin1"))
# indicator is 1:
# Detect a new node(RSP/RP/LC/FC) of which fpds we'll need to check
# if upgrade goes successful
# indicator is None:
# Detect node that is not found in output of "admin show platform"
# we need to check if FPD upgrade goes successful in this case
if indicator == 1 or indicator is None:
last_location = first_word
# indicator is 0:
# Detect node to be PEM/FAN or some other unsupported hardware in eXR.
# we don't care if the FPD upgrade for these is successful or not
# so we update last_location to None
else:
last_location = None
# Found some fpd that needs upgrade
if last_location and len(line) >= 79 and line[76:79] == "Yes":
fpdtype_end_idx = 51
while line[fpdtype_end_idx] != ' ':
fpdtype_end_idx += 1
fpdtype = line[51:fpdtype_end_idx]
if fpdtype not in subtype_to_locations_need_upgrade:
# it is possible to have duplicates, so using set here
subtype_to_locations_need_upgrade[fpdtype] = set()
subtype_to_locations_need_upgrade[fpdtype].add(last_location)
return subtype_to_locations_need_upgrade
def _check_if_fpd_package_installed(self):
"""
Check if the FPD package is already active on device.
Error out if not.
:return: None if FPD package is active, error out if not.
"""
active_packages = self.ctx.send("show install active summary")
match = re.search("fpd", active_packages)
if not match:
self.ctx.error("No FPD package is active on device. Please install the FPD package on device first.")
return
def _ensure_updated_fpd(self, fpd_relevant_nodes):
"""
Upgrade FPD's if needed.
Steps:
1. Check version of the migration related FPD's. Get the dictionary
of FPD types mapped to locations for which we need to check for
upgrade successs.
2. Force install the FPD types that need upgrade on all locations.
Check FPD related sys log to make sure all necessary upgrades
defined by the dictionary complete successfully.
:param fpd_relevant_nodes: a dictionary. Keys are strings representing all node locations
on device parsed from output of "admin show platform".
Values are integers. Value can either be 0 or 1.
value 1 means that we actually will need to make sure that the
FPD upgrade later on for this node location completes successfully,
value 0 means that we don't need to check if the
FPD upgrade later on for this node location is successful or not.
:return: True if no error occurred.
"""
# check for the FPD version, if FPD needs upgrade,
log_and_post_status(self.ctx, "Checking FPD versions...")
subtype_to_locations_need_upgrade = self._check_fpd(fpd_relevant_nodes)
if subtype_to_locations_need_upgrade:
# Force upgrade all FPD's in RP and Line card that need upgrade, with the FPD pie or both the FPD
# pie and FPD SMU depending on release version
self._upgrade_all_fpds(subtype_to_locations_need_upgrade)
return True
def _upgrade_all_fpds(self, subtype_to_locations_need_upgrade):
"""Force upgrade certain FPD's on all locations. Check for success. """
def send_newline(ctx):
ctx.ctrl.sendline()
return True
def send_yes(ctx):
ctx.ctrl.sendline("yes")
return True
def error(ctx):
ctx.message = "Error upgrading FPD."
return False
def timeout(ctx):
ctx.message = "Timeout upgrading FPD."
return False
for fpdtype in subtype_to_locations_need_upgrade:
log_and_post_status(self.ctx, "FPD upgrade - start to upgrade FPD {} on all locations".format(fpdtype))
CONFIRM_CONTINUE = re.compile("Continue\? \[confirm\]")
CONFIRM_SECOND_TIME = re.compile("Continue \? \[no\]:")
UPGRADE_END = re.compile("FPD upgrade has ended.")
PROMPT = self.ctx.prompt
TIMEOUT = self.ctx.TIMEOUT
events = [PROMPT, CONFIRM_CONTINUE, CONFIRM_SECOND_TIME, UPGRADE_END, TIMEOUT]
transitions = [
(CONFIRM_CONTINUE, [0], 1, send_newline, TIMEOUT_FOR_FPD_UPGRADE),
(CONFIRM_SECOND_TIME, [0, 1], 2, send_yes, TIMEOUT_FOR_FPD_UPGRADE),
(UPGRADE_END, [1, 2], 3, None, 120),
(PROMPT, [3], -1, None, 0),
(PROMPT, [1, 2], -1, error, 0),
(TIMEOUT, [0, 1, 2], -1, timeout, 0),
]
if not self.ctx.run_fsm("Upgrade FPD",
"admin upgrade hw-module fpd {} location all".format(fpdtype),
events, transitions, timeout=30):
self.ctx.error("Error while upgrading FPD subtype {}. Please check session.log".format(fpdtype))
fpd_log = self.ctx.send("show log | include fpd")
for location in subtype_to_locations_need_upgrade[fpdtype]:
pattern = "Successfully\s*(?:downgrade|upgrade)\s*{}.*location\s*{}".format(fpdtype, location)
fpd_upgrade_success = re.search(pattern, fpd_log)
if not fpd_upgrade_success:
self.ctx.error("Failed to upgrade FPD subtype {} on location {}. ".format(fpdtype, location) +
"Please check session.log.")
return True
def _create_config_logs(self, csvfile, supported_log_name, unsupported_log_name, hostname, filename):
"""
Create two logs for migrated configs that are unsupported and supported by eXR.
They are stored in the same directory as session log, for user to view.
:param csvfile: the string csv filename generated by running NoX on original config.
:param supported_log_name: the string filename for the supported configs log
:param unsupported_log_name: the string filename for the unsupported configs log
:param hostname: string hostname of device, as recorded on CSM.
:param filename: string filename of original config
:return: None if no error occurred
"""
if not os.path.isfile(os.path.join(csvfile)):
self.ctx.error("Missing the csv file {} that should have been generated by the NoX tool".format(csvfile) +
" during the configuration conversion. Failed to write diagnostic files.")
supported_config_log = os.path.join(self.ctx.log_directory, supported_log_name)
unsupported_config_log = os.path.join(self.ctx.log_directory, unsupported_log_name)
try:
with open(supported_config_log, 'w') as supp_log:
with open(unsupported_config_log, 'w') as unsupp_log:
supp_log.write('Configurations Known and Supported to the NoX Conversion Tool \n \n')
unsupp_log.write('Configurations Unprocessed by the NoX Conversion Tool (Comments, Markers,' +
' or Unknown/Unsupported Configurations) \n \n')
supp_log.write('{0[0]:<8} {0[1]:^20} \n'.format(("Line No.", "Configuration")))
unsupp_log.write('{0[0]:<8} {0[1]:^20} \n'.format(("Line No.", "Configuration")))
with open(csvfile, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if len(row) >= 3 and row[1].strip() == "KNOWN_SUPPORTED":
supp_log.write('{0[0]:<8} {0[1]:<} \n'.format((row[0], row[2])))
elif len(row) >= 3:
unsupp_log.write('{0[0]:<8} {0[1]:<} \n'.format((row[0], row[2])))
msg = "\n \nPlease find original configuration in csm_data/migration/{}/{} \n".format(hostname,
filename)
supp_log.write(msg)
unsupp_log.write(msg)
if filename.split('.')[0] == 'admin':
msg2 = "The final converted configuration is in csm_data/migration/" + \
hostname + "/" + CONVERTED_ADMIN_CAL_CONFIG_IN_CSM + \
" and csm_data/migration/" + hostname + "/" + CONVERTED_ADMIN_XR_CONFIG_IN_CSM
else:
msg2 = "The final converted configuration is in csm_data/migration/" + \
hostname + "/" + CONVERTED_XR_CONFIG_IN_CSM
supp_log.write(msg2)
unsupp_log.write(msg2)
csvfile.close()
unsupp_log.close()
supp_log.close()
except:
self.ctx.error("Error writing diagnostic files - in " + self.ctx.log_directory +
" during configuration migration.")
def _filter_server_repository(self, server):
"""Filter out LOCAL server repositories and only keep TFTP, FTP and SFTP"""
if not server:
self.ctx.error("Pre-Migrate missing server repository object.")
if server.server_type != ServerType.FTP_SERVER and server.server_type != ServerType.TFTP_SERVER and \
server.server_type != ServerType.SFTP_SERVER:
self.ctx.error("Pre-Migrate does not support " + server.server_type + " server repository.")
def _save_config_to_csm_data(self, files, admin=False):
"""
Copy the admin configuration or IOS-XR configuration
from device to csm_data.
:param files: the full local file paths for configs.
:param admin: True if asking for admin config, False otherwise.
:return: None
"""
try:
cmd = "admin show run" if admin else "show run"
output = self.ctx.send(cmd, timeout=TIMEOUT_FOR_COPY_CONFIG)
ind = output.rfind('Building configuration...\n')
except pexpect.TIMEOUT:
self.ctx.error("CLI '{}' timed out after 1 hour.".format(cmd))
for file_path in files:
# file = '../../csm_data/migration/<hostname>' + filename
file_to_write = open(file_path, 'w+')
file_to_write.write(output[(ind + 1):])
file_to_write.close()
def _handle_configs(self, hostname, server, repo_url, fileloc, nox_to_use, config_filename):
"""
1. Copy admin and XR configs from device to TFTP/FTP/SFTP server repository.
2. Copy admin and XR configs from server repository to csm_data/migration/<hostname>/
3. Copy admin and XR configs from server repository to session log directory as
show-running-config.txt and admin-show-running-config.txt for comparisons
after Migrate or Post-Migrate. (Diff will be generated.)
4. Run NoX on admin config first. This run generates 1) eXR admin/calvados config
and POSSIBLY 2) eXR XR config.
5. Run NoX on XR config if no custom eXR config has been selected by user when
Pre-Migrate is scheduled. This run generates eXR XR config.
6. Merge either the selected eXR custom config or the converted XR config with the converted
eXR XR config to form a new file - cXR_xr_plane_converted_eXR.cfg
7. Copy the eXR admin/calvados config and the cXR_xr_plane_converted_eXR.cfg to the server
repository and then from there to device.
Note if user selected custom eXR XR config, that will be uploaded instead of
the NoX migrated original XR config.
:param hostname: string hostname of device, as recorded on CSM.
:param repo_url: the URL of the selected server repository. i.e., tftp://223.255.254.245/tftpboot
:param fileloc: the string path ../../csm_data/migration/<hostname>
:param nox_to_use: the name of the NoX binary executable
:param config_filename: the user selected string filename of custom eXR XR config.
If it's '', nothing was selected.
If selected, this file must be in the server repository.
:return: None if no error occurred.
"""
log_and_post_status(self.ctx, "Cleaning up previously saved configuration files for this host in csm_data")
for old_file in os.listdir(fileloc):
try:
os.remove(os.path.join(fileloc, old_file))
except:
self.ctx.warning("Failed to remove the old configuration conversion file " +
"{}".format(os.path.join(fileloc, old_file)))
pass
log_and_post_status(self.ctx, "Saving the current configurations on device into server repository and csm_data")
self._save_config_to_csm_data([os.path.join(fileloc, ADMIN_CONFIG_IN_CSM),
os.path.join(self.ctx.log_directory,
self.ctx.normalize_filename("admin show running-config"))
], admin=True)
self._save_config_to_csm_data([os.path.join(fileloc, XR_CONFIG_IN_CSM),
os.path.join(self.ctx.log_directory,
self.ctx.normalize_filename("show running-config"))
], admin=False)
log_and_post_status(self.ctx, "Converting admin configuration file with configuration migration tool")
self._run_migration_on_config(fileloc, ADMIN_CONFIG_IN_CSM, nox_to_use, hostname)
# ["admin.cal"]
config_files = [CONVERTED_ADMIN_CAL_CONFIG_IN_CSM]
# ["cXR_admin_plane_converted_eXR.cfg"]
config_names_on_device = [FINAL_CAL_CONFIG]
if not config_filename:
log_and_post_status(self.ctx, "Converting IOS-XR configuration file with configuration migration tool")
self._run_migration_on_config(fileloc, XR_CONFIG_IN_CSM, nox_to_use, hostname)
# admin.iox and xr.iox
files_to_merge = [os.path.join(fileloc, CONVERTED_ADMIN_XR_CONFIG_IN_CSM),
os.path.join(fileloc, CONVERTED_XR_CONFIG_IN_CSM)]
with open(os.path.join(fileloc, FINAL_XR_CONFIG), 'w') as merged_file:
for fname in files_to_merge:
with open(fname) as infile:
for line in infile:
merged_file.write(line)
# "cXR_xr_plane_converted_eXR.cfg" - product of files_to_merge, merging will be done
config_files.append(FINAL_XR_CONFIG)
# "cXR_xr_plane_converted_eXR.cfg"
config_names_on_device.append(FINAL_XR_CONFIG)
log_and_post_status(self.ctx, "Uploading the migrated configuration files to server repository and device.")
config_names_in_repo = [hostname + "_" + config_name for config_name in config_files]
if self._upload_files_to_server_repository([os.path.join(fileloc, config_name)
for config_name in config_files],
server, config_names_in_repo):
if config_filename:
config_names_in_repo.append(config_filename)
# "cXR_xr_plane_converted_eXR.cfg"
config_names_on_device.append(FINAL_XR_CONFIG)
self._copy_files_to_device(server, repo_url, config_names_in_repo,
[CONFIG_LOCATION + config_name
for config_name in config_names_on_device],
timeout=TIMEOUT_FOR_COPY_CONFIG)
def _get_packages(self, packages):
"""Find out which package is eXR tar file, which is crypto_auto_key_gen.txt"""
if len(packages) > 2:
self.ctx.error("More than two packages are selected, however, only the ASR9K IOS XR 64 Bit tar file and the crypto key generation file should be selected.")
if len(packages) == 0:
self.ctx.error("No ASR9K IOS XR 64 Bit tar file selected for Pre-Migrate.")
image_pattern = re.compile("asr9k.*\.tar.*")
exr_tar = None
crypto_file = None
for package in packages:
if image_pattern.match(package):
exr_tar = package
else:
crypto_file = package
return exr_tar, crypto_file
def _find_nox_to_use(self):
"""
Find out if the linux system is 32 bit or 64 bit. NoX currently only has a binary executable
compiled for 64 bit.
"""
check_32_or_64_system = subprocess.Popen(['uname', '-a'], stdout=subprocess.PIPE)
out, err = check_32_or_64_system.communicate()
if err:
self.ctx.error("Failed to execute 'uname -a' on the linux system.")
if "x86_64" in out:
return NOX_64_BINARY
else:
self.ctx.error("The configuration migration tool NoX is currently not available for 32 bit linux system.")
def run(self):
server_repo_url = None
try:
server_repo_url = self.ctx.server_repository_url
except AttributeError:
pass
if server_repo_url is None:
self.ctx.error("No repository provided.")
try:
packages = self.ctx.software_packages
except AttributeError:
self.ctx.error("No package list provided")
config_filename_tuple = self.ctx.load_job_data('config_filename')
if config_filename_tuple:
config_filename = config_filename_tuple[0]
server = None
try:
server = self.ctx.get_server
except AttributeError:
self.ctx.error("No server repository selected")
if server is None:
self.ctx.error("No server repository selected")
if not self.ctx.load_job_data('override_hw_req'):
self.ctx.error("Missing indication of whether to override hardware requirement or not.")
exr_image, crypto_file = self._get_packages(packages)
version_match = re.findall("(\d+\.\d+)\.\d+", exr_image)
if version_match:
exr_version = version_match[0]
else:
self.ctx.error("The selected tar file is missing release number in its filename.")
self._filter_server_repository(server)
hostname_for_filename = re.sub("[()\s]", "_", self.ctx._csm.host.hostname)
hostname_for_filename = re.sub("_+", "_", hostname_for_filename)
fileloc = self.ctx.migration_directory + hostname_for_filename
if not os.path.exists(fileloc):
os.makedirs(fileloc)
self.ctx.save_job_data('hardware_audit_version', exr_version)
hardware_audit_plugin = HardwareAuditPlugin(self.ctx)
hardware_audit_plugin.run()
fpd_relevant_nodes_tuple = self.ctx.load_job_data('fpd_relevant_nodes')
if fpd_relevant_nodes_tuple:
fpd_relevant_nodes = fpd_relevant_nodes_tuple[0]
else:
self.ctx.error("No data field fpd_relevant_nodes after completing hardware audit successfully.")
log_and_post_status(self.ctx, "Checking current software version.")
match_version = re.search("(\d\.\d\.\d).*", self.ctx.os_version)
if not match_version:
self.ctx.error("Bad os_version.")
version = match_version.group(1)
if version < MINIMUM_RELEASE_VERSION_FOR_MIGRATION:
self.ctx.error("The minimal release version required for migration is {0}. Please upgrade to at lease {0} before scheduling migration.".format(MINIMUM_RELEASE_VERSION_FOR_MIGRATION))
log_and_post_status(self.ctx, "Testing ping to selected server repository IP.")
self._ping_repository_check(server_repo_url)
log_and_post_status(self.ctx, "Checking if FPD package is active on device.")
self._check_if_fpd_package_installed()
nox_to_use = self.ctx.migration_directory + self._find_nox_to_use()
if not os.path.isfile(nox_to_use):
self.ctx.error("The configuration conversion tool {} is missing. ".format(nox_to_use) +
"CSM should have downloaded it from CCO when migration actions were scheduled.")
self._save_show_platform()
log_and_post_status(self.ctx, "Partition check and disk clean-up.")
self._resize_eusb()
self._handle_configs(hostname_for_filename, server,
server_repo_url, fileloc, nox_to_use, config_filename)
log_and_post_status(self.ctx, "Copying the ASR9K-X64 image from server repository to device.")
self._copy_files_to_device(server, server_repo_url, [exr_image],
[IMAGE_LOCATION + exr_image], timeout=TIMEOUT_FOR_COPY_IMAGE)
if crypto_file:
log_and_post_status(self.ctx, "Copying the crypto key generation file from server repository to device.")
self._copy_files_to_device(server, server_repo_url, [crypto_file],
[CONFIG_LOCATION + crypto_file], timeout=60)
self._ensure_updated_fpd(fpd_relevant_nodes)
# Refresh package and inventory information
get_package(self.ctx)
get_inventory(self.ctx)
return True
|
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A text classification cnn/rnn inferencer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from PIL import Image
from scipy import misc
import StringIO
import tensorflow as tf
import numpy as np
import sys
import os
import copy
import argparse
import facenet
import json
import align.detect_face
from uai.arch.tf_model import TFAiUcloudModel
class FaceCompareModel(TFAiUcloudModel):
""" FaceCompareModel example model
"""
def __init__(self, conf):
super(FaceCompareModel, self).__init__(conf)
def load_model(self):
sess = tf.Session()
with sess.as_default():
# Load the model
facenet.load_model(self.model_dir)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
self._images_placeholder=images_placeholder
self._embeddings=embeddings
self._phase_train_placeholder=phase_train_placeholder
self._sess = sess
def preprocess(self, data):
json_data = json.load(data)
cnt = json_data['cnt']
raw_images = json_data['images']
images = []
for raw_image in raw_images:
img_data = raw_image.decode('base64')
img = Image.open(StringIO.StringIO(img_data))
img = misc.fromimage(img)
img = misc.imresize(img, (160, 160), interp='bilinear')
prewhitened = facenet.prewhiten(img)
images.append(prewhitened)
return images
def compare(self, data):
images = self.preprocess(data)
images_placeholder = self._images_placeholder
phase_train_placeholder = self._phase_train_placeholder
sess = self._sess
embeddings = self._embeddings
feed_dict = {images_placeholder: images, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict)
if len(images) < 2:
return -1
dist = np.sqrt(np.sum(np.square(np.subtract(emb[0,:], emb[1,:]))))
print(' %1.4f ' % dist, end='')
return dist
def execute(self, data, batch_size):
results = []
for i in range(batch_size):
ret = self.compare(data[i])
if ret == -1:
results.append(str(-1))
else:
results.append(ret.astype('S10'))
return results
class FaceEmbedModel(TFAiUcloudModel):
""" FaceEmbedModel example model
"""
def __init__(self, conf):
json.encoder.FLOAT_REPR = lambda o: format(o, '.8f')
super(FaceEmbedModel, self).__init__(conf)
def load_model(self):
sess = tf.Session()
with sess.as_default():
# Load the model
facenet.load_model(self.model_dir)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
self._images_placeholder=images_placeholder
self._embeddings=embeddings
self._phase_train_placeholder=phase_train_placeholder
self._sess = sess
def preprocess(self, data):
json_data = json.load(data)
raw_images = json_data['images']
images = []
for raw_image in raw_images:
img_data = raw_image.decode('base64')
img = Image.open(StringIO.StringIO(img_data))
img = misc.fromimage(img)
img = misc.imresize(img, (160, 160), interp='bilinear')
prewhitened = facenet.prewhiten(img)
images.append(prewhitened)
return images
def cal_embed(self, data):
images = self.preprocess(data)
images_placeholder = self._images_placeholder
phase_train_placeholder = self._phase_train_placeholder
sess = self._sess
embeddings = self._embeddings
feed_dict = {images_placeholder: images, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict)
return emb
def execute(self, data, batch_size):
results = []
for i in range(batch_size):
ret = self.cal_embed(data[i])
ret = ret.tolist()
ret = json.dumps(ret)
#ret = json.dumps([[ret.__dict__ for ob in lst] for lst in ret])
results.append(ret)
return results
class FaceEmbedModelAutoBatch(TFAiUcloudModel):
""" FaceEmbedModel example model
"""
def __init__(self, conf):
json.encoder.FLOAT_REPR = lambda o: format(o, '.8f')
super(FaceEmbedModelAutoBatch, self).__init__(conf)
def load_model(self):
sess = tf.Session()
with sess.as_default():
# Load the model
facenet.load_model(self.model_dir)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
self._images_placeholder=images_placeholder
self._embeddings=embeddings
self._phase_train_placeholder=phase_train_placeholder
self._sess = sess
def cal_embed(self, images):
images_placeholder = self._images_placeholder
phase_train_placeholder = self._phase_train_placeholder
sess = self._sess
embeddings = self._embeddings
feed_dict = {images_placeholder: images, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict)
return emb
def execute(self, data, batch_size):
image_list = []
results = []
for i in range(batch_size):
img = Image.open(data[i])
img = misc.fromimage(img)
img = misc.imresize(img, (160, 160), interp='bilinear')
prewhitened = facenet.prewhiten(img)
image_list.append(prewhitened)
rets = self.cal_embed(image_list)
for i in range(batch_size):
ret = rets[i].tolist()
ret = json.dumps(ret)
#ret = json.dumps([[ret.__dict__ for ob in lst] for lst in ret])
results.append(ret)
return results
class FaceDetectionModel(TFAiUcloudModel):
""" FaceCompareModel example model
"""
def __init__(self, conf):
super(FaceDetectionModel, self).__init__(conf)
def load_model(self):
sess = tf.Session()
with sess.as_default():
# Load the model
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
self._pent = pnet
self._rnet = rnet
self._onet = onet
def execute(self, data, batch_size):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
pnet = self._pent
rnet = self._rnet
onet = self._onet
ret = []
for i in range(batch_size):
img = Image.open(data[i])
img = misc.fromimage(img)
boundingboxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
boundingboxes = boundingboxes.tolist()
ret_val = json.dumps(boundingboxes)
ret.append(ret_val)
return ret
|
|
import os
import py, pytest
from _pytest.mark import MarkGenerator as Mark
class TestMark:
def test_markinfo_repr(self):
from _pytest.mark import MarkInfo
m = MarkInfo("hello", (1,2), {})
repr(m)
def test_pytest_exists_in_namespace_all(self):
assert 'mark' in py.test.__all__
assert 'mark' in pytest.__all__
def test_pytest_mark_notcallable(self):
mark = Mark()
pytest.raises((AttributeError, TypeError), mark)
def test_pytest_mark_name_starts_with_underscore(self):
mark = Mark()
pytest.raises(AttributeError, getattr, mark, '_some_name')
def test_pytest_mark_bare(self):
mark = Mark()
def f():
pass
mark.hello(f)
assert f.hello
def test_pytest_mark_keywords(self):
mark = Mark()
def f():
pass
mark.world(x=3, y=4)(f)
assert f.world
assert f.world.kwargs['x'] == 3
assert f.world.kwargs['y'] == 4
def test_apply_multiple_and_merge(self):
mark = Mark()
def f():
pass
mark.world
mark.world(x=3)(f)
assert f.world.kwargs['x'] == 3
mark.world(y=4)(f)
assert f.world.kwargs['x'] == 3
assert f.world.kwargs['y'] == 4
mark.world(y=1)(f)
assert f.world.kwargs['y'] == 1
assert len(f.world.args) == 0
def test_pytest_mark_positional(self):
mark = Mark()
def f():
pass
mark.world("hello")(f)
assert f.world.args[0] == "hello"
mark.world("world")(f)
def test_pytest_mark_positional_func_and_keyword(self):
mark = Mark()
def f():
raise Exception
m = mark.world(f, omega="hello")
def g():
pass
assert m(g) == g
assert g.world.args[0] is f
assert g.world.kwargs["omega"] == "hello"
def test_pytest_mark_reuse(self):
mark = Mark()
def f():
pass
w = mark.some
w("hello", reason="123")(f)
assert f.some.args[0] == "hello"
assert f.some.kwargs['reason'] == "123"
def g():
pass
w("world", reason2="456")(g)
assert g.some.args[0] == "world"
assert 'reason' not in g.some.kwargs
assert g.some.kwargs['reason2'] == "456"
def test_marked_class_run_twice(testdir, request):
"""Test fails file is run twice that contains marked class.
See issue#683.
"""
py_file = testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('abc', [1, 2, 3])
class Test1(object):
def test_1(self, abc):
assert abc in [1, 2, 3]
""")
file_name = os.path.basename(py_file.strpath)
rec = testdir.inline_run(file_name, file_name)
rec.assertoutcome(passed=6)
def test_ini_markers(testdir):
testdir.makeini("""
[pytest]
markers =
a1: this is a webtest marker
a2: this is a smoke marker
""")
testdir.makepyfile("""
def test_markers(pytestconfig):
markers = pytestconfig.getini("markers")
print (markers)
assert len(markers) >= 2
assert markers[0].startswith("a1:")
assert markers[1].startswith("a2:")
""")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
def test_markers_option(testdir):
testdir.makeini("""
[pytest]
markers =
a1: this is a webtest marker
a1some: another marker
""")
result = testdir.runpytest("--markers", )
result.stdout.fnmatch_lines([
"*a1*this is a webtest*",
"*a1some*another marker",
])
def test_markers_option_with_plugin_in_current_dir(testdir):
testdir.makeconftest('pytest_plugins = "flip_flop"')
testdir.makepyfile(flip_flop="""\
def pytest_configure(config):
config.addinivalue_line("markers", "flip:flop")
def pytest_generate_tests(metafunc):
try:
mark = metafunc.function.flipper
except AttributeError:
return
metafunc.parametrize("x", (10, 20))""")
testdir.makepyfile("""\
import pytest
@pytest.mark.flipper
def test_example(x):
assert x""")
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(["*flip*flop*"])
def test_mark_on_pseudo_function(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.r(lambda x: 0/0)
def test_hello():
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_strict_prohibits_unregistered_markers(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.unregisteredmark
def test_hello():
pass
""")
result = testdir.runpytest("--strict")
assert result.ret != 0
result.stdout.fnmatch_lines([
"*unregisteredmark*not*registered*",
])
@pytest.mark.parametrize("spec", [
("xyz", ("test_one",)),
("xyz and xyz2", ()),
("xyz2", ("test_two",)),
("xyz or xyz2", ("test_one", "test_two"),)
])
def test_mark_option(spec, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xyz
def test_one():
pass
@pytest.mark.xyz2
def test_two():
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-m", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize("spec", [
("interface", ("test_interface",)),
("not interface", ("test_nointer",)),
])
def test_mark_option_custom(spec, testdir):
testdir.makeconftest("""
import pytest
def pytest_collection_modifyitems(items):
for item in items:
if "interface" in item.nodeid:
item.keywords["interface"] = pytest.mark.interface
""")
testdir.makepyfile("""
def test_interface():
pass
def test_nointer():
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-m", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize("spec", [
("interface", ("test_interface",)),
("not interface", ("test_nointer", "test_pass")),
("pass", ("test_pass",)),
("not pass", ("test_interface", "test_nointer")),
])
def test_keyword_option_custom(spec, testdir):
testdir.makepyfile("""
def test_interface():
pass
def test_nointer():
pass
def test_pass():
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-k", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize("spec", [
("None", ("test_func[None]",)),
("1.3", ("test_func[1.3]",)),
("2-3", ("test_func[2-3]",))
])
def test_keyword_option_parametrize(spec, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
""")
opt, passed_result = spec
rec = testdir.inline_run("-k", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
def test_parametrized_collected_from_command_line(testdir):
"""Parametrized test not collected if test named specified
in command line issue#649.
"""
py_file = testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
""")
file_name = os.path.basename(py_file.strpath)
rec = testdir.inline_run(file_name + "::" + "test_func")
rec.assertoutcome(passed=3)
class TestFunctional:
def test_mark_per_function(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.hello
def test_hello():
assert hasattr(test_hello, 'hello')
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_mark_per_module(self, testdir):
item = testdir.getitem("""
import pytest
pytestmark = pytest.mark.hello
def test_func():
pass
""")
keywords = item.keywords
assert 'hello' in keywords
def test_marklist_per_class(self, testdir):
item = testdir.getitem("""
import pytest
class TestClass:
pytestmark = [pytest.mark.hello, pytest.mark.world]
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
""")
keywords = item.keywords
assert 'hello' in keywords
def test_marklist_per_module(self, testdir):
item = testdir.getitem("""
import pytest
pytestmark = [pytest.mark.hello, pytest.mark.world]
class TestClass:
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
""")
keywords = item.keywords
assert 'hello' in keywords
assert 'world' in keywords
def test_mark_per_class_decorator(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.hello
class TestClass:
def test_func(self):
assert TestClass.test_func.hello
""")
keywords = item.keywords
assert 'hello' in keywords
def test_mark_per_class_decorator_plus_existing_dec(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.hello
class TestClass:
pytestmark = pytest.mark.world
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
""")
keywords = item.keywords
assert 'hello' in keywords
assert 'world' in keywords
def test_merging_markers(self, testdir):
p = testdir.makepyfile("""
import pytest
pytestmark = pytest.mark.hello("pos1", x=1, y=2)
class TestClass:
# classlevel overrides module level
pytestmark = pytest.mark.hello(x=3)
@pytest.mark.hello("pos0", z=4)
def test_func(self):
pass
""")
items, rec = testdir.inline_genitems(p)
item, = items
keywords = item.keywords
marker = keywords['hello']
assert marker.args == ("pos0", "pos1")
assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4}
# test the new __iter__ interface
l = list(marker)
assert len(l) == 3
assert l[0].args == ("pos0",)
assert l[1].args == ()
assert l[2].args == ("pos1", )
@pytest.mark.xfail(reason='unfixed')
def test_merging_markers_deep(self, testdir):
# issue 199 - propagate markers into nested classes
p = testdir.makepyfile("""
import pytest
class TestA:
pytestmark = pytest.mark.a
def test_b(self):
assert True
class TestC:
# this one didnt get marked
def test_d(self):
assert True
""")
items, rec = testdir.inline_genitems(p)
for item in items:
print (item, item.keywords)
assert 'a' in item.keywords
def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.a
class Base: pass
@pytest.mark.b
class Test1(Base):
def test_foo(self): pass
class Test2(Base):
def test_bar(self): pass
""")
items, rec = testdir.inline_genitems(p)
self.assert_markers(items, test_foo=('a', 'b'), test_bar=('a',))
@pytest.mark.issue568
@pytest.mark.xfail(reason="markers smear on methods of base classes")
def test_mark_should_not_pass_to_siebling_class(self, testdir):
p = testdir.makepyfile("""
import pytest
class TestBase:
def test_foo(self):
pass
@pytest.mark.b
class TestSub(TestBase):
pass
class TestOtherSub(TestBase):
pass
""")
items, rec = testdir.inline_genitems(p)
base_item, sub_item, sub_item_other = items
assert not hasattr(base_item.obj, 'b')
assert not hasattr(sub_item_other.obj, 'b')
def test_mark_decorator_baseclasses_merged(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.a
class Base: pass
@pytest.mark.b
class Base2(Base): pass
@pytest.mark.c
class Test1(Base2):
def test_foo(self): pass
class Test2(Base2):
@pytest.mark.d
def test_bar(self): pass
""")
items, rec = testdir.inline_genitems(p)
self.assert_markers(items, test_foo=('a', 'b', 'c'),
test_bar=('a', 'b', 'd'))
def test_mark_with_wrong_marker(self, testdir):
reprec = testdir.inline_runsource("""
import pytest
class pytestmark:
pass
def test_func():
pass
""")
l = reprec.getfailedcollections()
assert len(l) == 1
assert "TypeError" in str(l[0].longrepr)
def test_mark_dynamically_in_funcarg(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture
def arg(request):
request.applymarker(pytest.mark.hello)
def pytest_terminal_summary(terminalreporter):
l = terminalreporter.stats['passed']
terminalreporter.writer.line("keyword: %s" % l[0].keywords)
""")
testdir.makepyfile("""
def test_func(arg):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"keyword: *hello*"
])
def test_merging_markers_two_functions(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.hello("pos1", z=4)
@pytest.mark.hello("pos0", z=3)
def test_func():
pass
""")
items, rec = testdir.inline_genitems(p)
item, = items
keywords = item.keywords
marker = keywords['hello']
l = list(marker)
assert len(l) == 2
assert l[0].args == ("pos0",)
assert l[1].args == ("pos1",)
def test_no_marker_match_on_unmarked_names(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.shouldmatch
def test_marked():
assert 1
def test_unmarked():
assert 1
""")
reprec = testdir.inline_run("-m", "test_unmarked", p)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) + len(skipped) + len(failed) == 0
dlist = reprec.getcalls("pytest_deselected")
deselected_tests = dlist[0].items
assert len(deselected_tests) == 2
def test_keywords_at_node_level(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="session", autouse=True)
def some(request):
request.keywords["hello"] = 42
assert "world" not in request.keywords
@pytest.fixture(scope="function", autouse=True)
def funcsetup(request):
assert "world" in request.keywords
assert "hello" in request.keywords
@pytest.mark.world
def test_function():
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_keyword_added_for_session(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_collection_modifyitems(session):
session.add_marker("mark1")
session.add_marker(pytest.mark.mark2)
session.add_marker(pytest.mark.mark3)
pytest.raises(ValueError, lambda:
session.add_marker(10))
""")
testdir.makepyfile("""
def test_some(request):
assert "mark1" in request.keywords
assert "mark2" in request.keywords
assert "mark3" in request.keywords
assert 10 not in request.keywords
marker = request.node.get_marker("mark1")
assert marker.name == "mark1"
assert marker.args == ()
assert marker.kwargs == {}
""")
reprec = testdir.inline_run("-m", "mark1")
reprec.assertoutcome(passed=1)
def assert_markers(self, items, **expected):
"""assert that given items have expected marker names applied to them.
expected should be a dict of (item name -> seq of expected marker names)
.. note:: this could be moved to ``testdir`` if proven to be useful
to other modules.
"""
from _pytest.mark import MarkInfo
items = dict((x.name, x) for x in items)
for name, expected_markers in expected.items():
markers = items[name].keywords._markers
marker_names = set([name for (name, v) in markers.items()
if isinstance(v, MarkInfo)])
assert marker_names == set(expected_markers)
@pytest.mark.xfail(reason='callspec2.setmulti misuses keywords')
@pytest.mark.issue1540
def test_mark_from_parameters(self, testdir):
testdir.makepyfile("""
import pytest
pytestmark = pytest.mark.skipif(True, reason='skip all')
# skipifs inside fixture params
params = [pytest.mark.skipif(False, reason='dont skip')('parameter')]
@pytest.fixture(params=params)
def parameter(request):
return request.param
def test_1(parameter):
assert True
""")
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=1)
class TestKeywordSelection:
def test_select_simple(self, testdir):
file_test = testdir.makepyfile("""
def test_one():
assert 0
class TestClass(object):
def test_method_one(self):
assert 42 == 43
""")
def check(keyword, name):
reprec = testdir.inline_run("-s", "-k", keyword, file_test)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
assert failed[0].nodeid.split("::")[-1] == name
assert len(reprec.getcalls('pytest_deselected')) == 1
for keyword in ['test_one', 'est_on']:
check(keyword, 'test_one')
check('TestClass and test', 'test_method_one')
@pytest.mark.parametrize("keyword", [
'xxx', 'xxx and test_2', 'TestClass', 'xxx and not test_1',
'TestClass and test_2', 'xxx and TestClass and test_2'])
def test_select_extra_keywords(self, testdir, keyword):
p = testdir.makepyfile(test_select="""
def test_1():
pass
class TestClass:
def test_2(self):
pass
""")
testdir.makepyfile(conftest="""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(name):
outcome = yield
if name == "TestClass":
item = outcome.get_result()
item.extra_keyword_matches.add("xxx")
""")
reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword)
py.builtin.print_("keyword", repr(keyword))
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 1
assert passed[0].nodeid.endswith("test_2")
dlist = reprec.getcalls("pytest_deselected")
assert len(dlist) == 1
assert dlist[0].items[0].name == 'test_1'
def test_select_starton(self, testdir):
threepass = testdir.makepyfile(test_threepass="""
def test_one(): assert 1
def test_two(): assert 1
def test_three(): assert 1
""")
reprec = testdir.inline_run("-k", "test_two:", threepass)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 2
assert not failed
dlist = reprec.getcalls("pytest_deselected")
assert len(dlist) == 1
item = dlist[0].items[0]
assert item.name == "test_one"
def test_keyword_extra(self, testdir):
p = testdir.makepyfile("""
def test_one():
assert 0
test_one.mykeyword = True
""")
reprec = testdir.inline_run("-k", "mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
@pytest.mark.xfail
def test_keyword_extra_dash(self, testdir):
p = testdir.makepyfile("""
def test_one():
assert 0
test_one.mykeyword = True
""")
# with argparse the argument to an option cannot
# start with '-'
reprec = testdir.inline_run("-k", "-mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert passed + skipped + failed == 0
def test_no_magic_values(self, testdir):
"""Make sure the tests do not match on magic values,
no double underscored values, like '__dict__',
and no instance values, like '()'.
"""
p = testdir.makepyfile("""
def test_one(): assert 1
""")
def assert_test_is_not_selected(keyword):
reprec = testdir.inline_run("-k", keyword, p)
passed, skipped, failed = reprec.countoutcomes()
dlist = reprec.getcalls("pytest_deselected")
assert passed + skipped + failed == 0
deselected_tests = dlist[0].items
assert len(deselected_tests) == 1
assert_test_is_not_selected("__")
assert_test_is_not_selected("()")
|
|
# -*- coding: utf-8 -*-
"""
Django settings for To-Do-List project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
import os
ROOT_DIR = environ.Path(__file__) - 3 # (to_do_list/config/settings/common.py - 3 = to_do_list/)
APPS_DIR = ROOT_DIR.path('to_do_list')
env = environ.Env()
env.read_env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'to_do_list.users.apps.UsersConfig',
# Your stuff: custom apps go here
'django_tables2',
'django_filters',
'captcha',
'to_do_list.tasks',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'to_do_list.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Arnaud Blois""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# -----------------------------------------------------------------------------
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Setting up a TRAVIS Database settings
# more info at https://gist.github.com/ndarville/3625246 and
# http://www.lesinskis.com/travis_ci_django.html
if 'BUILD_ON_TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///origin'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'to_do_list.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'to_do_list.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'tasks:home'
LOGIN_URL = 'account_login'
ACCOUNT_SIGNUP_FORM_CLASS = 'to_do_list.users.forms.AllauthSignupForm'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# Default keys givn here instruct Google to use the test recaptcha which always validates
RECAPTCHA_PUBLIC_KEY = env('RECAPTCHA_PUBLIC_KEY', default='6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI')
RECAPTCHA_PRIVATE_KEY = env('RECAPTCHA_PRIVATE_KEY', default='6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe')
NOCAPTCHA = True
|
|
#!/usr/bin/env python
from nose.tools import assert_equal
import networkx as nx
from networkx.algorithms import bipartite
from networkx.testing import assert_edges_equal, assert_nodes_equal
class TestBipartiteProject:
def test_path_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.projected_graph(G, [1, 3])
assert_nodes_equal(list(P), [1, 3])
assert_edges_equal(list(P.edges()), [(1, 3)])
P=bipartite.projected_graph(G, [0, 2])
assert_nodes_equal(list(P), [0, 2])
assert_edges_equal(list(P.edges()), [(0, 2)])
def test_path_projected_properties_graph(self):
G=nx.path_graph(4)
G.add_node(1,name='one')
G.add_node(2,name='two')
P=bipartite.projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
assert_equal(P.node[1]['name'],G.node[1]['name'])
P=bipartite.projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
assert_equal(P.node[2]['name'],G.node[2]['name'])
def test_path_collaboration_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_directed_path_collaboration_projected_graph(self):
G=nx.DiGraph()
nx.add_path(G, range(4))
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_directed_graph(self):
G=nx.DiGraph()
nx.add_path(G, range(4))
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_star_projected_graph(self):
G=nx.star_graph(3)
P=bipartite.projected_graph(G,[1,2,3])
assert_nodes_equal(list(P),[1,2,3])
assert_edges_equal(list(P.edges()),[(1,2),(1,3),(2,3)])
P=bipartite.weighted_projected_graph(G,[1,2,3])
assert_nodes_equal(list(P),[1,2,3])
assert_edges_equal(list(P.edges()),[(1,2),(1,3),(2,3)])
P=bipartite.projected_graph(G,[0])
assert_nodes_equal(list(P),[0])
assert_edges_equal(list(P.edges()),[])
def test_project_multigraph(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('a',2)
G.add_edge('b',2)
P=bipartite.projected_graph(G,'ab')
assert_edges_equal(list(P.edges()),[('a','b')])
P=bipartite.weighted_projected_graph(G,'ab')
assert_edges_equal(list(P.edges()),[('a','b')])
P=bipartite.projected_graph(G,'ab',multigraph=True)
assert_edges_equal(list(P.edges()),[('a','b'),('a','b')])
def test_project_collaboration(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('b',2)
G.add_edge('c',2)
G.add_edge('c',3)
G.add_edge('c',4)
G.add_edge('b',4)
P=bipartite.collaboration_weighted_projected_graph(G,'abc')
assert_equal(P['a']['b']['weight'],1)
assert_equal(P['b']['c']['weight'],2)
def test_directed_projection(self):
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge('B',2)
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
assert_equal(P['A']['B']['weight'],1)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(list(P.edges()),[('A','B')])
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge(2,'B')
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
assert_equal(P['A']['B']['weight'],2)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(list(P.edges()),[('A','B'),('A','B')])
class TestBipartiteWeightedProjection:
def setUp(self):
# Tore Opsahl's example
# http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/
self.G=nx.Graph()
self.G.add_edge('A',1)
self.G.add_edge('A',2)
self.G.add_edge('B',1)
self.G.add_edge('B',2)
self.G.add_edge('B',3)
self.G.add_edge('B',4)
self.G.add_edge('B',5)
self.G.add_edge('C',1)
self.G.add_edge('D',3)
self.G.add_edge('E',4)
self.G.add_edge('E',5)
self.G.add_edge('E',6)
self.G.add_edge('F',6)
# Graph based on figure 6 from Newman (2001)
self.N=nx.Graph()
self.N.add_edge('A',1)
self.N.add_edge('A',2)
self.N.add_edge('A',3)
self.N.add_edge('B',1)
self.N.add_edge('B',2)
self.N.add_edge('B',3)
self.N.add_edge('C',1)
self.N.add_edge('D',1)
self.N.add_edge('E',3)
def test_project_weighted_shared(self):
edges=[('A','B',2),
('A','C',1),
('B','C',1),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3),
('A','E',1),
('A','C',1),
('A','D',1),
('B','E',1),
('B','C',1),
('B','D',1),
('C','D',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_newman(self):
edges=[('A','B',1.5),
('A','C',0.5),
('B','C',0.5),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',11/6.0),
('A','E',1/2.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/2.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_ratio(self):
edges=[('A','B',2/6.0),
('A','C',1/6.0),
('B','C',1/6.0),
('B','D',1/6.0),
('B','E',2/6.0),
('E','F',1/6.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G, 'ABCDEF', ratio=True)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N, 'ABCDE', ratio=True)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_overlap(self):
edges=[('A','B',2/2.0),
('A','C',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('B','E',2/3.0),
('E','F',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF', jaccard=False)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/1.0),
('A','C',1/1.0),
('A','D',1/1.0),
('B','E',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE', jaccard=False)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_jaccard(self):
edges=[('A','B',2/5.0),
('A','C',1/2.0),
('B','C',1/5.0),
('B','D',1/5.0),
('B','E',2/6.0),
('E','F',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_generic_weighted_projected_graph_simple(self):
def shared(G, u, v):
return len(set(G[u]) & set(G[v]))
B = nx.path_graph(5)
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4], weight_function=shared)
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(list(G.edges(data=True))),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(list(G.edges(data=True))),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
B = nx.DiGraph()
nx.add_path(B, range(5))
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(G.edges(data=True)),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
def test_generic_weighted_projected_graph_custom(self):
def jaccard(G, u, v):
unbrs = set(G[u])
vnbrs = set(G[v])
return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
def my_weight(G, u, v, weight='weight'):
w = 0
for nbr in set(G[u]) & set(G[v]):
w += G.edge[u][nbr].get(weight, 1) + G.edge[v][nbr].get(weight, 1)
return w
B = nx.bipartite.complete_bipartite_graph(2, 2)
for i,(u,v) in enumerate(B.edges()):
B.edge[u][v]['weight'] = i + 1
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=jaccard)
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 1.0})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=my_weight)
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 10})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1])
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 2})])
|
|
#!/usr/bin/env python
from light_classification.tl_classifier import TLClassifier
from styx_msgs.msg import TrafficLightArray, TrafficLight
from geometry_msgs.msg import PoseStamped, Pose
from sensor_msgs.msg import Image
from scipy.spatial import KDTree
from std_msgs.msg import Int32
from styx_msgs.msg import Lane
from cv_bridge import CvBridge
import numpy as np
import rospy
import yaml
import tf
import cv2
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.camera_image = None
self.has_image = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
# print("*** RED LIGHT! *** ")
# print("State Count: " + str(self.state_count))
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
x: longitudinal position to match a waypoint to
y: lateral position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
closest_idx = -1
if self.waypoint_tree is not None:
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# check if closest point is ahead or behind of ego
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# hyperplane eqn thru closest coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = closest_idx + 1 % len(self.waypoints_2d)
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if self.light_classifier is None:
return TrafficLight.RED
if not self.has_image:
return TrafficLight.UNKNOWN
if hasattr(self.camera_image, 'encoding'):
if self.camera_image.encoding == '8UC3':
self.camera_image.encoding = "rgb8"
else:
self.camera_image.encoding = 'rgb8'
img = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
light_state = self.light_classifier.get_class(img)
#print("predicted state: ", light_state)
#print("ground thruth: ", light.state)
return light_state #light.state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if self.pose and self.waypoints:
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# get stop line wp idx
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# find closest stop line waypoint idx
d = temp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
|
# -*- coding: utf-8 -*-
"""
This module defines an image-level classification application
that maps from images to scalar, multi-class labels.
This class is instantiated and initalized by the application_driver.
"""
import os
import tensorflow as tf
from niftynet.application.base_application import BaseApplication
from niftynet.engine.application_factory import \
ApplicationNetFactory, InitializerFactory, OptimiserFactory
from niftynet.engine.application_variables import \
CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES
from niftynet.engine.sampler_resize_v2 import ResizeSampler
from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator
from niftynet.engine.windows_aggregator_grid import GridSamplesAggregator
from niftynet.io.image_reader import ImageReader
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.histogram_normalisation import \
HistogramNormalisationLayer
from niftynet.layer.binary_masking import BinaryMaskingLayer
from niftynet.layer.post_processing import PostProcessingLayer
from niftynet.layer.loss_classification import LossFunction
from niftynet.layer.mean_variance_normalisation import \
MeanVarNormalisationLayer
from niftynet.layer.rand_flip import RandomFlipLayer
from niftynet.layer.rand_rotation import RandomRotationLayer
from niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer
from niftynet.evaluation.classification_evaluator import ClassificationEvaluator
SUPPORTED_INPUT = set(['image', 'label', 'sampler', 'inferred'])
class ClassificationApplication(BaseApplication):
"""This class defines an application for image-level classification
problems mapping from images to scalar labels.
This is the application class to be instantiated by the driver
and referred to in configuration files.
Although structurally similar to segmentation, this application
supports different samplers/aggregators (because patch-based
processing is not appropriate), and monitoring metrics."""
REQUIRED_CONFIG_SECTION = "CLASSIFICATION"
def __init__(self, net_param, action_param, action):
super(ClassificationApplication, self).__init__()
tf.logging.info('starting classification application')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.data_param = None
self.classification_param = None
self.SUPPORTED_SAMPLING = {
'resize': (self.initialise_resize_sampler,
self.initialise_resize_sampler),
}
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.classification_param = task_param
if self.is_training:
reader_names = ('image', 'label', 'sampler')
elif self.is_inference:
reader_names = ('image',)
elif self.is_evaluation:
reader_names = ('image', 'label', 'inferred')
else:
tf.logging.fatal(
'Action `%s` not supported. Expected one of %s',
self.action, self.SUPPORTED_PHASES)
raise ValueError
try:
reader_phase = self.action_param.dataset_to_infer
except AttributeError:
reader_phase = None
file_lists = data_partitioner.get_file_lists_by(
phase=reader_phase, action=self.action)
self.readers = [
ImageReader(reader_names).initialise(
data_param, task_param, file_list) for file_list in file_lists]
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0) \
if self.net_param.normalise_foreground_only else None
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image', binary_masking_func=foreground_masking_layer) \
if self.net_param.whitening else None
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
binary_masking_func=foreground_masking_layer,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer') \
if (self.net_param.histogram_ref_file and
self.net_param.normalisation) else None
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(task_param).get('label'),
model_filename=self.net_param.histogram_ref_file) \
if (self.net_param.histogram_ref_file and
task_param.label_normalisation) else None
normalisation_layers = []
if histogram_normaliser is not None:
normalisation_layers.append(histogram_normaliser)
if mean_var_normaliser is not None:
normalisation_layers.append(mean_var_normaliser)
if label_normaliser is not None:
normalisation_layers.append(label_normaliser)
augmentation_layers = []
if self.is_training:
train_param = self.action_param
if train_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=train_param.random_flipping_axes))
if train_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=train_param.scaling_percentage[0],
max_percentage=train_param.scaling_percentage[1],
antialiasing=train_param.antialiasing,
isotropic=train_param.isotropic_scaling))
if train_param.rotation_angle or \
self.action_param.rotation_angle_x or \
self.action_param.rotation_angle_y or \
self.action_param.rotation_angle_z:
rotation_layer = RandomRotationLayer()
if train_param.rotation_angle:
rotation_layer.init_uniform_angle(
train_param.rotation_angle)
else:
rotation_layer.init_non_uniform_angle(
self.action_param.rotation_angle_x,
self.action_param.rotation_angle_y,
self.action_param.rotation_angle_z)
augmentation_layers.append(rotation_layer)
# only add augmentation to first reader (not validation reader)
self.readers[0].add_preprocessing_layers(
normalisation_layers + augmentation_layers)
for reader in self.readers[1:]:
reader.add_preprocessing_layers(normalisation_layers)
# Checking num_classes is set correctly
if self.classification_param.num_classes <= 1:
raise ValueError("Number of classes must be at least 2 for classification")
for preprocessor in self.readers[0].preprocessors:
if preprocessor.name == 'label_norm':
if len(preprocessor.label_map[preprocessor.key[0]]) != self.classification_param.num_classes:
raise ValueError("Number of unique labels must be equal to "
"number of classes (check histogram_ref file)")
def initialise_resize_sampler(self):
self.sampler = [[ResizeSampler(
reader=reader,
window_sizes=self.data_param,
batch_size=self.net_param.batch_size,
shuffle=self.is_training,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_aggregator(self):
self.output_decoder = ResizeSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
postfix=self.action_param.output_postfix)
def initialise_sampler(self):
if self.is_training:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][0]()
else:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][1]()
def initialise_network(self):
w_regularizer = None
b_regularizer = None
reg_type = self.net_param.reg_type.lower()
decay = self.net_param.decay
if reg_type == 'l2' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l2_regularizer(decay)
b_regularizer = regularizers.l2_regularizer(decay)
elif reg_type == 'l1' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l1_regularizer(decay)
b_regularizer = regularizers.l1_regularizer(decay)
self.net = ApplicationNetFactory.create(self.net_param.name)(
num_classes=self.classification_param.num_classes,
w_initializer=InitializerFactory.get_initializer(
name=self.net_param.weight_initializer),
b_initializer=InitializerFactory.get_initializer(
name=self.net_param.bias_initializer),
w_regularizer=w_regularizer,
b_regularizer=b_regularizer,
acti_func=self.net_param.activation_function)
def add_confusion_matrix_summaries_(self,
outputs_collector,
net_out,
data_dict):
""" This method defines several monitoring metrics that
are derived from the confusion matrix """
labels = tf.reshape(tf.cast(data_dict['label'], tf.int64), [-1])
prediction = tf.reshape(tf.argmax(net_out, -1), [-1])
num_classes = self.classification_param.num_classes
conf_mat = tf.contrib.metrics.confusion_matrix(labels, prediction, num_classes)
conf_mat = tf.to_float(conf_mat)
if self.classification_param.num_classes == 2:
outputs_collector.add_to_collection(
var=conf_mat[1][1], name='true_positives',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=conf_mat[1][0], name='false_negatives',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=conf_mat[0][1], name='false_positives',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=conf_mat[0][0], name='true_negatives',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
else:
outputs_collector.add_to_collection(
var=conf_mat[tf.newaxis, :, :, tf.newaxis],
name='confusion_matrix',
average_over_devices=True, summary_type='image',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=tf.trace(conf_mat), name='accuracy',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
def switch_sampler(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0][0 if for_training else -1]
return sampler.pop_batch_op()
if self.is_training:
self.patience = self.action_param.patience
self.mode = self.action_param.early_stopping_mode
if self.action_param.validation_every_n > 0:
data_dict = tf.cond(tf.logical_not(self.is_validation),
lambda: switch_sampler(for_training=True),
lambda: switch_sampler(for_training=False))
else:
data_dict = switch_sampler(for_training=True)
image = tf.cast(data_dict['image'], tf.float32)
net_args = {'is_training': self.is_training,
'keep_prob': self.net_param.keep_prob}
net_out = self.net(image, **net_args)
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.action_param.lr)
loss_func = LossFunction(
n_class=self.classification_param.num_classes,
loss_type=self.action_param.loss_type)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
else:
loss = data_loss
self.total_loss = loss
grads = self.optimiser.compute_gradients(
loss, colocate_gradients_with_ops=True)
outputs_collector.add_to_collection(
var=self.total_loss, name='total_loss',
average_over_devices=True, collection=CONSOLE)
outputs_collector.add_to_collection(
var=self.total_loss, name='total_loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='data_loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='data_loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
self.add_confusion_matrix_summaries_(outputs_collector,
net_out,
data_dict)
else:
# converting logits into final output for
# classification probabilities or argmax classification labels
data_dict = switch_sampler(for_training=False)
image = tf.cast(data_dict['image'], tf.float32)
net_args = {'is_training': self.is_training,
'keep_prob': self.net_param.keep_prob}
net_out = self.net(image, **net_args)
tf.logging.info(
'net_out.shape may need to be resized: %s', net_out.shape)
output_prob = self.classification_param.output_prob
num_classes = self.classification_param.num_classes
if output_prob and num_classes > 1:
post_process_layer = PostProcessingLayer(
'SOFTMAX', num_classes=num_classes)
elif not output_prob and num_classes > 1:
post_process_layer = PostProcessingLayer(
'ARGMAX', num_classes=num_classes)
else:
post_process_layer = PostProcessingLayer(
'IDENTITY', num_classes=num_classes)
net_out = post_process_layer(net_out)
outputs_collector.add_to_collection(
var=net_out, name='window',
average_over_devices=False, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=False, collection=NETWORK_OUTPUT)
self.initialise_aggregator()
def interpret_output(self, batch_output):
if not self.is_training:
return self.output_decoder.decode_batch(
{'csv': batch_output['window']},
batch_output['location'])
return True
def initialise_evaluator(self, eval_param):
self.eval_param = eval_param
self.evaluator = ClassificationEvaluator(self.readers[0],
self.classification_param,
eval_param)
def add_inferred_output(self, data_param, task_param):
return self.add_inferred_output_like(data_param, task_param, 'label')
|
|
import re
from utils import Bunch
#TODO: consider adding "* * last 11 4 *" style strings (last thursday of november)
class InvalidFieldError(Exception):
pass
class InvalidCronStringError(Exception):
pass
class BasicCronRule(object):
start_year = 2000
stop_year = 2025
holiday_re = "[\*]\s[\*]\s(\d{1,2})\s(\d{1,2})\s[\*]\s(\d{4})"
def __init__(self, cron_string, start_year=None, stop_year=None):
"""
cron_string should look like: "* */6 * * 6-7 2015"
start_year and stop_year are integers that determine the inclusive range of years that will be checked
default is the class variables start_year and stop_year
"""
self.rulesets = self.parse(cron_string, start_year, stop_year)
@classmethod
def parse_field(cls, f, minimum=0, maximum=0):
"""
Returns a set containing the right elements
minimum and maximum define the range of values used for wildcards
minimum and maximum as passed should be inclusive integers.
All +1s will be added here.
e.g. parse_field("0-1", 0, 2) -> set([0,1])
e.g. parse_field("*", 0, 1) -> set([0,1])
handles rules that look like: "12", "1-10", "1-10/2", "*", "*/10", "1-10/3,12,14-16"
"""
regexes = [
("^(\d{1,2})$", lambda d: {int(d)}),
("^(\d{1,2})-(\d{1,2})$", lambda t: set(xrange(int(t[0]), int(t[1])+1))),
("^(\d{1,2})-(\d{1,2})\/(\d{1,2})$", lambda t: set(xrange(int(t[0]), int(t[1])+1, int(t[2])))),
("^\*$", lambda wc: set(xrange(minimum, maximum+1))),
("^\*\/(\d{1,2})$", lambda d: set(xrange(minimum, maximum+1, int(d)))),
("^([\d\-\/]+),([\d\-\/,]+)$", lambda t: cls.parse_field(t[0]).union(cls.parse_field(t[1])))
]
for regex, fn in regexes:
matches = re.findall(regex, f)
if len(matches) > 0:
v = fn(matches[0])
return v
#If none of the regexes match, this field is not valid
raise InvalidFieldError(f)
@classmethod
def parse(cls, cron_string, start_year=None, stop_year=None):
"""
Parses a cron_string that looks like "m h dom mo dow year"
return is a dictionary of sets holding integers contained by that field
"""
start_year = start_year or cls.start_year
stop_year = stop_year or cls.stop_year
try:
fields = cron_string.split(" ")
return {
"minutes": cls.parse_field(fields[0], 0, 59),
"hours": cls.parse_field(fields[1], 0, 23),
"dom": cls.parse_field(fields[2], 1, 31),
"month": cls.parse_field(fields[3], 1, 12),
"dow": cls.parse_field(fields[4], 1, 7),
"year": cls.parse_field(fields[5], start_year, stop_year) #What is a sensible year here?
}
except InvalidFieldError as e:
raise InvalidCronStringError("{}: ({})".format(cron_string, e.args[0]))
@staticmethod
def is_holiday(rule):
"""
Holiday is defined as one day, one month, one year:
e.g. Easter: "* * 5 4 * 2015"
"""
return re.compile(BasicCronRule.holiday_re).match(rule.strip()) is not None
@staticmethod
def holiday_tuple(hrule):
"""
assumes hrule is a holiday
returns tuple: (dd, mm, yyyy)
"""
return tuple([ int(d) for d in re.findall(BasicCronRule.holiday_re, hrule.strip())[0] ])
@classmethod
def is_valid(cls, cron_string):
"""
Note that this is just a wrapper around parse(), so usually it's faster to just attempt parse,
and catch the error
"""
try:
cls.parse(cron_string)
return True
except InvalidCronStringError:
return False
def contains(self, time_obj):
"""
Returns True/False if time_obj is contained in ruleset
"""
#If all checks pass, the time_obj belongs to this ruleset
if time_obj.year not in self.rulesets["year"]:
return False
if time_obj.month not in self.rulesets["month"]:
return False
if time_obj.day not in self.rulesets["dom"]:
return False
if time_obj.isoweekday() not in self.rulesets["dow"]:
return False
if time_obj.hour not in self.rulesets["hours"]:
return False
if time_obj.minute not in self.rulesets["minutes"]:
return False
return True
def __contains__(self, time_obj):
return self.contains(time_obj)
class CronRangeRule(BasicCronRule):
hhmm_re = "(\d{1,2}):(\d{1,2})"
@classmethod
def parse_field(cls, f, minimum=0, maximum=0):
#Try to find HH:MM fields
try:
hour, minute = map(int, re.findall(CronRangeRule.hhmm_re, f.strip())[0])
return Bunch(hour=hour, minute=minute)
except:
#Otherwise assume nomal cron field
return super(CronRangeRule, cls).parse_field(f, minimum, maximum)
@classmethod
def parse(cls, cron_string, start_year=None, stop_year=None):
try:
if not cls.looks_like_range_rule(cron_string):
raise InvalidCronStringError(cron_string)
start_year = start_year or cls.start_year
stop_year = stop_year or cls.stop_year
fields = cron_string.split(" ")
return {
"start": cls.parse_field(fields[0]),
"stop": cls.parse_field(fields[1]),
"dom": cls.parse_field(fields[2], 1, 31),
"month": cls.parse_field(fields[3], 1, 12),
"dow": cls.parse_field(fields[4], 1, 7),
"year": cls.parse_field(fields[5], start_year, stop_year) #What is a sensible year here?
}
except InvalidFieldError as e:
raise InvalidCronStringError("{}: ({})".format(cron_string, e.args[0]))
def contains(self, time_obj):
"""
Returns True/False if time_obj is contained in ruleset
"""
#If all checks pass, the time_obj belongs to this ruleset
if time_obj.year not in self.rulesets["year"]:
return False
if time_obj.month not in self.rulesets["month"]:
return False
if time_obj.day not in self.rulesets["dom"]:
return False
if time_obj.isoweekday() not in self.rulesets["dow"]:
return False
#Determine if time_obj is within the time range
if time_obj.hour < self.rulesets["start"].hour or (time_obj.hour == self.rulesets["start"].hour and time_obj.minute < self.rulesets["start"].minute):
return False
if time_obj.hour > self.rulesets["stop"].hour or (time_obj.hour == self.rulesets["stop"].hour and\
time_obj.minute > self.rulesets["stop"].minute):
return False
return True
@staticmethod
def looks_like_range_rule(cron_string):
"""
This class method checks whether or not a cron string looks like "12:34 13:31 ..."
It doesn't go through the logic of checking each field. Parsing is equivalent to validating
"""
fields = cron_string.split(" ")
hhmm_re = re.compile(CronRangeRule.hhmm_re)
return (hhmm_re.match(fields[0]) is not None) and (hhmm_re.match(fields[1]) is not None)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import signal
from collections import namedtuple
from difflib import unified_diff
from threading import Lock as threading_lock
from threading import Event
from thrift.protocol import TJSONProtocol
from thrift.TSerialization import serialize
from twitter.common import log
from twitter.common.quantity import Amount, Time
from apache.aurora.client.base import combine_messages, format_response
from .error_handling_thread import ExecutionError, spawn_worker
from .instance_watcher import InstanceWatcher
from .job_monitor import JobMonitor
from .quota_check import CapacityRequest, QuotaCheck
from .scheduler_client import SchedulerProxy
from .scheduler_mux import SchedulerMux
from .updater_util import FailureThreshold, UpdaterConfig
from gen.apache.aurora.api.constants import ACTIVE_STATES
from gen.apache.aurora.api.ttypes import (
AddInstancesConfig,
JobKey,
Lock,
LockKey,
LockValidation,
Response,
ResponseCode,
ResponseDetail,
TaskQuery
)
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
class Updater(object):
"""Performs an update command using a collection of parallel threads.
The number of parallel threads used is determined by the UpdateConfig.batch_size."""
class Error(Exception):
"""Updater error wrapper."""
pass
RPC_COMPLETION_TIMEOUT_SECS = Amount(120, Time.SECONDS)
OPERATION_CONFIGS = namedtuple('OperationConfigs', ['from_config', 'to_config'])
INSTANCE_CONFIGS = namedtuple(
'InstanceConfigs',
['remote_config_map', 'local_config_map', 'instances_to_process']
)
INSTANCE_DATA = namedtuple('InstanceData', ['instance_id', 'operation_configs'])
def __init__(self,
config,
health_check_interval_seconds,
scheduler=None,
instance_watcher=None,
quota_check=None,
job_monitor=None,
scheduler_mux=None,
rpc_completion_timeout=RPC_COMPLETION_TIMEOUT_SECS):
self._config = config
self._job_key = JobKey(role=config.role(), environment=config.environment(), name=config.name())
self._health_check_interval_seconds = health_check_interval_seconds
self._scheduler = scheduler or SchedulerProxy(config.cluster())
self._quota_check = quota_check or QuotaCheck(self._scheduler)
self._scheduler_mux = scheduler_mux or SchedulerMux()
self._job_monitor = job_monitor or JobMonitor(
self._scheduler,
self._config.job_key(),
scheduler_mux=self._scheduler_mux)
self._rpc_completion_timeout = rpc_completion_timeout
try:
self._update_config = UpdaterConfig(**config.update_config().get())
except ValueError as e:
raise self.Error(str(e))
if self._update_config.pulse_interval_secs:
raise self.Error('Pulse interval seconds is not supported by the client updater.')
self._lock = None
self._thread_lock = threading_lock()
self._batch_wait_event = Event()
self._batch_completion_queue = Queue()
self.failure_threshold = FailureThreshold(
self._update_config.max_per_instance_failures,
self._update_config.max_total_failures
)
self._watcher = instance_watcher or InstanceWatcher(
self._scheduler,
self._job_key,
self._update_config.restart_threshold,
self._update_config.watch_secs,
self._health_check_interval_seconds,
scheduler_mux=self._scheduler_mux)
self._terminating = False
def _start(self):
"""Starts an update by applying an exclusive lock on a job being updated.
Returns Response instance from the scheduler call.
"""
resp = self._scheduler.acquireLock(LockKey(job=self._job_key))
if resp.responseCode == ResponseCode.OK:
self._lock = resp.result.acquireLockResult.lock
return resp
def _finish(self):
"""Finishes an update by removing an exclusive lock on an updated job.
Returns Response instance from the scheduler call.
"""
resp = self._scheduler.releaseLock(self._lock, LockValidation.CHECKED)
if resp.responseCode == ResponseCode.OK:
self._lock = None
else:
log.error('There was an error finalizing the update: %s' % combine_messages(resp))
return resp
def int_handler(self, *args):
"""Ensures keyboard interrupt exception is raised on a main thread."""
raise KeyboardInterrupt()
def _update(self, instance_configs):
"""Drives execution of the update logic.
Performs instance updates in parallel using a number of threads bound by
the batch_size config option.
Arguments:
instance_configs -- list of instance update configurations to go through.
Returns the set of instances that failed to update.
"""
# Register signal handler to ensure KeyboardInterrupt is received by a main thread.
signal.signal(signal.SIGINT, self.int_handler)
instances_to_update = [
self.INSTANCE_DATA(
instance_id,
self.OPERATION_CONFIGS(
from_config=instance_configs.remote_config_map,
to_config=instance_configs.local_config_map))
for instance_id in instance_configs.instances_to_process
]
log.info('Instances to update: %s' % instance_configs.instances_to_process)
update_queue = self._update_instances_in_parallel(self._update_instance, instances_to_update)
if self._is_failed_update(quiet=False):
if not self._update_config.rollback_on_failure:
log.info('Rollback on failure is disabled in config. Aborting rollback')
return
rollback_ids = self._get_rollback_ids(instance_configs.instances_to_process, update_queue)
instances_to_revert = [
self.INSTANCE_DATA(
instance_id,
self.OPERATION_CONFIGS(
from_config=instance_configs.local_config_map,
to_config=instance_configs.remote_config_map))
for instance_id in rollback_ids
]
log.info('Reverting update for: %s' % rollback_ids)
self._update_instances_in_parallel(self._revert_instance, instances_to_revert)
return not self._is_failed_update()
def _update_instances_in_parallel(self, target, instances_to_update):
"""Processes instance updates in parallel and waits for completion.
Arguments:
target -- target method to handle instance update.
instances_to_update -- list of InstanceData with update details.
Returns Queue with non-updated instance data.
"""
log.info('Processing in parallel with %s worker thread(s)' % self._update_config.batch_size)
instance_queue = Queue()
for instance_to_update in instances_to_update:
instance_queue.put(instance_to_update)
try:
threads = []
for _ in range(self._update_config.batch_size):
threads.append(spawn_worker(target, kwargs={'instance_queue': instance_queue}))
for thread in threads:
thread.join_and_raise()
except Exception as e:
log.debug('Caught unhandled exception: %s' % e)
self._terminate()
raise
return instance_queue
def _try_reset_batch_wait_event(self, instance_id, instance_queue):
"""Resets batch_wait_event in case the current batch is filled up.
This is a helper method that separates thread locked logic. Called from
_wait_for_batch_completion_if_needed() when a given instance update completes.
Resumes worker threads if all batch instances are updated.
Arguments:
instance_id -- Instance ID being processed.
instance_queue -- Instance update work queue.
"""
with self._thread_lock:
log.debug("Instance ID %s: Completion queue size %s" %
(instance_id, self._batch_completion_queue.qsize()))
log.debug("Instance ID %s: Instance queue size %s" %
(instance_id, instance_queue.qsize()))
self._batch_completion_queue.put(instance_id)
filled_up = self._batch_completion_queue.qsize() % self._update_config.batch_size == 0
all_done = instance_queue.qsize() == 0
if filled_up or all_done:
# Required batch size of completed instances has filled up -> unlock waiting threads.
log.debug('Instance %s completes the batch wait.' % instance_id)
self._batch_wait_event.set()
self._batch_wait_event.clear()
return True
return False
def _wait_for_batch_completion_if_needed(self, instance_id, instance_queue):
"""Waits for batch completion if wait_for_batch_completion flag is set.
Arguments:
instance_id -- Instance ID.
instance_queue -- Instance update work queue.
"""
if not self._update_config.wait_for_batch_completion:
return
if not self._try_reset_batch_wait_event(instance_id, instance_queue):
# The current batch has not filled up -> block the work thread.
log.debug('Instance %s is done. Waiting for batch to complete.' % instance_id)
self._batch_wait_event.wait()
def _terminate(self):
"""Attempts to terminate all outstanding activities."""
if not self._terminating:
log.info('Cleaning up')
self._terminating = True
self._scheduler.terminate()
self._job_monitor.terminate()
self._scheduler_mux.terminate()
self._watcher.terminate()
self._batch_wait_event.set()
def _update_instance(self, instance_queue):
"""Works through the instance_queue and performs instance updates (one at a time).
Arguments:
instance_queue -- Queue of InstanceData to update.
"""
while not self._terminating and not self._is_failed_update():
try:
instance_data = instance_queue.get_nowait()
except Empty:
return
update = True
restart = False
while update or restart and not self._terminating and not self._is_failed_update():
instances_to_watch = []
if update:
instances_to_watch += self._kill_and_add_instance(instance_data)
update = False
else:
instances_to_watch += self._request_restart_instance(instance_data)
if instances_to_watch:
failed_instances = self._watcher.watch(instances_to_watch)
restart = self._is_restart_needed(failed_instances)
self._wait_for_batch_completion_if_needed(instance_data.instance_id, instance_queue)
def _revert_instance(self, instance_queue):
"""Works through the instance_queue and performs instance rollbacks (one at a time).
Arguments:
instance_queue -- Queue of InstanceData to revert.
"""
while not self._terminating:
try:
instance_data = instance_queue.get_nowait()
except Empty:
return
log.info('Reverting instance: %s' % instance_data.instance_id)
instances_to_watch = self._kill_and_add_instance(instance_data)
if instances_to_watch and self._watcher.watch(instances_to_watch):
log.error('Rollback failed for instance: %s' % instance_data.instance_id)
def _kill_and_add_instance(self, instance_data):
"""Acquires update instructions and performs required kill/add/kill+add sequence.
Arguments:
instance_data -- InstanceData to update.
Returns added instance ID.
"""
log.info('Examining instance: %s' % instance_data.instance_id)
to_kill, to_add = self._create_kill_add_lists(
[instance_data.instance_id],
instance_data.operation_configs)
if not to_kill and not to_add:
log.info('Skipping unchanged instance: %s' % instance_data.instance_id)
return to_add
if to_kill:
self._request_kill_instance(instance_data)
if to_add:
self._request_add_instance(instance_data)
return to_add
def _request_kill_instance(self, instance_data):
"""Instructs the scheduler to kill instance and waits for completion.
Arguments:
instance_data -- InstanceData to kill.
"""
log.info('Killing instance: %s' % instance_data.instance_id)
self._enqueue_and_wait(instance_data, self._kill_instances)
result = self._job_monitor.wait_until(
JobMonitor.terminal,
[instance_data.instance_id],
with_timeout=True)
if not result:
raise self.Error('Instance %s was not killed in time' % instance_data.instance_id)
log.info('Killed: %s' % instance_data.instance_id)
def _request_add_instance(self, instance_data):
"""Instructs the scheduler to add instance.
Arguments:
instance_data -- InstanceData to add.
"""
log.info('Adding instance: %s' % instance_data.instance_id)
self._enqueue_and_wait(instance_data, self._add_instances)
log.info('Added: %s' % instance_data.instance_id)
def _request_restart_instance(self, instance_data):
"""Instructs the scheduler to restart instance.
Arguments:
instance_data -- InstanceData to restart.
Returns restarted instance ID.
"""
log.info('Restarting instance: %s' % instance_data.instance_id)
self._enqueue_and_wait(instance_data, self._restart_instances)
log.info('Restarted: %s' % instance_data.instance_id)
return [instance_data.instance_id]
def _enqueue_and_wait(self, instance_data, command):
"""Queues up the scheduler call and waits for completion.
Arguments:
instance_data -- InstanceData to query scheduler for.
command -- scheduler command to run.
"""
try:
self._scheduler_mux.enqueue_and_wait(
command,
instance_data,
timeout=self._rpc_completion_timeout)
except SchedulerMux.Error as e:
raise self.Error('Failed to complete instance %s operation. Reason: %s'
% (instance_data.instance_id, e))
def _is_failed_update(self, quiet=True):
"""Verifies the update status in a thread-safe manner.
Arguments:
quiet -- Whether the logging should be suppressed in case of a failed update. Default True.
Returns True if update failed, False otherwise.
"""
with self._thread_lock:
return self.failure_threshold.is_failed_update(log_errors=not quiet)
def _is_restart_needed(self, failed_instances):
"""Checks if there are any failed instances recoverable via restart.
Arguments:
failed_instances -- Failed instance IDs.
Returns True if restart is allowed, False otherwise (i.e. update failed).
"""
if not failed_instances:
return False
log.info('Failed instances: %s' % failed_instances)
with self._thread_lock:
unretryable_instances = self.failure_threshold.update_failure_counts(failed_instances)
if unretryable_instances:
log.warn('Not restarting failed instances %s, which exceeded '
'maximum allowed instance failure limit of %s' %
(unretryable_instances, self._update_config.max_per_instance_failures))
return False if unretryable_instances else True
def _get_rollback_ids(self, update_list, update_queue):
"""Gets a list of instance ids to rollback.
Arguments:
update_list -- original list of instances intended for update.
update_queue -- untouched instances not processed during update.
Returns sorted list of instance IDs to rollback.
"""
untouched_ids = []
while not update_queue.empty():
untouched_ids.append(update_queue.get_nowait().instance_id)
return sorted(list(set(update_list) - set(untouched_ids)), reverse=True)
def _hashable(self, element):
if isinstance(element, (list, set)):
return tuple(sorted(self._hashable(item) for item in element))
elif isinstance(element, dict):
return tuple(
sorted((self._hashable(key), self._hashable(value)) for (key, value) in element.items())
)
return element
def _thrift_to_json(self, config):
return json.loads(
serialize(config, protocol_factory=TJSONProtocol.TSimpleJSONProtocolFactory()))
def _diff_configs(self, from_config, to_config):
# Thrift objects do not correctly compare against each other due to the unhashable nature
# of python sets. That results in occasional diff failures with the following symptoms:
# - Sets are not equal even though their reprs are identical;
# - Items are reordered within thrift structs;
# - Items are reordered within sets;
# To overcome all the above, thrift objects are converted into JSON dicts to flatten out
# thrift type hierarchy. Next, JSONs are recursively converted into nested tuples to
# ensure proper ordering on compare.
return ''.join(unified_diff(repr(self._hashable(self._thrift_to_json(from_config))),
repr(self._hashable(self._thrift_to_json(to_config)))))
def _create_kill_add_lists(self, instance_ids, operation_configs):
"""Determines a particular action (kill or add) to use for every instance in instance_ids.
Arguments:
instance_ids -- current batch of IDs to process.
operation_configs -- OperationConfigs with update details.
Returns lists of instances to kill and to add.
"""
to_kill = []
to_add = []
for instance_id in instance_ids:
from_config = operation_configs.from_config.get(instance_id)
to_config = operation_configs.to_config.get(instance_id)
if from_config and to_config:
diff_output = self._diff_configs(from_config, to_config)
if diff_output:
log.debug('Task configuration changed for instance [%s]:\n%s'
% (instance_id, diff_output))
to_kill.append(instance_id)
to_add.append(instance_id)
elif from_config and not to_config:
to_kill.append(instance_id)
elif not from_config and to_config:
to_add.append(instance_id)
else:
raise self.Error('Instance %s is outside of supported range' % instance_id)
return to_kill, to_add
def _kill_instances(self, instance_data):
"""Instructs the scheduler to batch-kill instances and waits for completion.
Arguments:
instance_data -- list of InstanceData to kill.
"""
instance_ids = [data.instance_id for data in instance_data]
log.debug('Batch killing instances: %s' % instance_ids)
query = self._create_task_query(instanceIds=frozenset(int(s) for s in instance_ids))
self._check_and_log_response(self._scheduler.killTasks(query, self._lock))
log.debug('Done batch killing instances: %s' % instance_ids)
def _add_instances(self, instance_data):
"""Instructs the scheduler to batch-add instances.
Arguments:
instance_data -- list of InstanceData to add.
"""
instance_ids = [data.instance_id for data in instance_data]
to_config = instance_data[0].operation_configs.to_config
log.debug('Batch adding instances: %s' % instance_ids)
add_config = AddInstancesConfig(
key=self._job_key,
taskConfig=to_config[instance_ids[0]], # instance_ids will always have at least 1 item.
instanceIds=frozenset(int(s) for s in instance_ids))
self._check_and_log_response(self._scheduler.addInstances(add_config, self._lock))
log.debug('Done batch adding instances: %s' % instance_ids)
def _restart_instances(self, instance_data):
"""Instructs the scheduler to batch-restart instances.
Arguments:
instance_data -- list of InstanceData to restart.
"""
instance_ids = [data.instance_id for data in instance_data]
log.debug('Batch restarting instances: %s' % instance_ids)
resp = self._scheduler.restartShards(self._job_key, instance_ids, self._lock)
self._check_and_log_response(resp)
log.debug('Done batch restarting instances: %s' % instance_ids)
def _validate_quota(self, instance_configs):
"""Validates job update will not exceed quota for production tasks.
Arguments:
instance_configs -- InstanceConfig with update details.
Returns Response.OK if quota check was successful.
"""
instance_operation = self.OPERATION_CONFIGS(
from_config=instance_configs.remote_config_map,
to_config=instance_configs.local_config_map
)
def _aggregate_quota(ops_list, config_map):
request = CapacityRequest()
for instance in ops_list:
task = config_map[instance]
if task.production:
request += CapacityRequest.from_task(task)
return request
to_kill, to_add = self._create_kill_add_lists(
instance_configs.instances_to_process,
instance_operation)
return self._quota_check.validate_quota_from_requested(
self._job_key,
self._config.job().taskConfig.production,
_aggregate_quota(to_kill, instance_operation.from_config),
_aggregate_quota(to_add, instance_operation.to_config))
def _get_update_instructions(self, instances=None):
"""Loads, validates and populates update working set.
Arguments:
instances -- (optional) set of instances to update.
Returns:
InstanceConfigs with the following data:
remote_config_map -- dictionary of {key:instance_id, value:task_config} from scheduler.
local_config_map -- dictionary of {key:instance_id, value:task_config} with local
task configs validated and populated with default values.
instances_to_process -- list of instance IDs to go over in update.
"""
# Load existing tasks and populate remote config map and instance list.
assigned_tasks = self._get_existing_tasks()
remote_config_map = {}
remote_instances = []
for assigned_task in assigned_tasks:
remote_config_map[assigned_task.instanceId] = assigned_task.task
remote_instances.append(assigned_task.instanceId)
# Validate local job config and populate local task config.
local_task_config = self._validate_and_populate_local_config()
# Union of local and remote instance IDs.
job_config_instances = list(range(self._config.instances()))
instance_superset = sorted(list(set(remote_instances) | set(job_config_instances)))
# Calculate the update working set.
if instances is None:
# Full job update -> union of remote and local instances
instances_to_process = instance_superset
else:
# Partial job update -> validate all instances are recognized
instances_to_process = instances
unrecognized = list(set(instances) - set(instance_superset))
if unrecognized:
raise self.Error('Instances %s are outside of supported range' % unrecognized)
# Populate local config map
local_config_map = dict.fromkeys(job_config_instances, local_task_config)
return self.INSTANCE_CONFIGS(remote_config_map, local_config_map, instances_to_process)
def _get_existing_tasks(self):
"""Loads all existing tasks from the scheduler.
Returns a list of AssignedTasks.
"""
resp = self._scheduler.getTasksStatus(self._create_task_query())
self._check_and_log_response(resp)
return [t.assignedTask for t in resp.result.scheduleStatusResult.tasks]
def _validate_and_populate_local_config(self):
"""Validates local job configuration and populates local task config with default values.
Returns a TaskConfig populated with default values.
"""
resp = self._scheduler.populateJobConfig(self._config.job())
self._check_and_log_response(resp)
return resp.result.populateJobResult.taskConfig
def _replace_template_if_cron(self):
"""Checks if the provided job config represents a cron job and if so, replaces it.
Returns True if job is cron and False otherwise.
"""
if self._config.job().cronSchedule:
resp = self._scheduler.replaceCronTemplate(self._config.job(), self._lock)
self._check_and_log_response(resp)
return True
else:
return False
def _create_task_query(self, instanceIds=None):
return TaskQuery(jobKeys=[self._job_key], statuses=ACTIVE_STATES, instanceIds=instanceIds)
def _failed_response(self, message):
# TODO(wfarner): Avoid synthesizing scheduler responses, consider using an exception instead.
return Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message=message)])
def update(self, instances=None):
"""Performs the job update, blocking until it completes.
A rollback will be performed if the update was considered a failure based on the
update configuration.
Arguments:
instances -- (optional) instances to update. If not specified, all instances will be updated.
Returns a response object with update result status.
"""
try:
resp = self._start()
if resp.responseCode != ResponseCode.OK:
return resp
try:
# Handle cron jobs separately from other jobs.
if self._replace_template_if_cron():
log.info('Cron template updated, next run will reflect changes')
return self._finish()
else:
try:
instance_configs = self._get_update_instructions(instances)
self._check_and_log_response(self._validate_quota(instance_configs))
except self.Error as e:
# Safe to release the lock acquired above as no job mutation has happened yet.
self._finish()
return self._failed_response('Unable to start job update: %s' % e)
if not self._update(instance_configs):
log.warn('Update failures threshold reached')
self._finish()
return self._failed_response('Update reverted')
else:
log.info('Update successful')
return self._finish()
except (self.Error, ExecutionError, Exception) as e:
return self._failed_response('Aborting update without rollback! Fatal error: %s' % e)
finally:
self._scheduler_mux.terminate()
@classmethod
def cancel_update(cls, scheduler, job_key):
"""Cancels an update process by removing an exclusive lock on a provided job.
Arguments:
scheduler -- scheduler instance to use.
job_key -- job key to cancel update for.
Returns a response object with cancel update result status.
"""
return scheduler.releaseLock(
Lock(key=LockKey(job=job_key.to_thrift())),
LockValidation.UNCHECKED)
def _check_and_log_response(self, resp):
"""Checks scheduler return status, raises Error in case of unexpected response.
Arguments:
resp -- scheduler response object.
Raises Error in case of unexpected response status.
"""
message = format_response(resp)
if resp.responseCode == ResponseCode.OK:
log.debug(message)
else:
raise self.Error(message)
|
|
"""RFC 6962 client API."""
import base64
import json
from ct.client.db import database
from ct.crypto import verify
from ct.proto import client_pb2
import gflags
import logging
import random
import requests
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import protocol
from twisted.internet import reactor as ireactor
from twisted.internet import task
from twisted.internet import threads
from twisted.python import failure
from twisted.web import client
from twisted.web import http
from twisted.web import iweb
from Queue import Queue
from zope.interface import implements
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("entry_fetch_batch_size", 1000, "Maximum number of "
"entries to attempt to fetch in one request.")
gflags.DEFINE_integer("max_fetchers_in_parallel", 100, "Maximum number of "
"concurrent fetches.")
gflags.DEFINE_integer("get_entries_retry_delay", 1, "Number of seconds after "
"which get-entries will be retried if it encountered "
"an error.")
gflags.DEFINE_integer("get_entries_max_retries", 10, "Number of retries after "
"which get-entries simply fails.")
gflags.DEFINE_integer("entries_buffer", 100000, "Size of buffer which stores "
"fetched entries before async log client is able to "
"return them. 100000 entries shouldn't take more "
"than 600 Mb of memory.")
gflags.DEFINE_integer("response_buffer_size_bytes", 50 * 1000 * 1000, "Maximum "
"size of a single response buffer. Should be set such "
"that a get_entries response comfortably fits in the "
"the buffer. A typical log entry is expected to be < "
"10kB.")
gflags.DEFINE_bool("persist_entries", True, "Cache entries on disk.")
class Error(Exception):
pass
class ClientError(Error):
pass
class HTTPError(Error):
"""Connection failed, or returned an error."""
pass
class HTTPConnectionError(HTTPError):
"""Connection failed."""
pass
class HTTPResponseSizeExceededError(HTTPError):
"""HTTP response exceeded maximum permitted size."""
pass
class HTTPClientError(HTTPError):
"""HTTP 4xx."""
pass
class HTTPServerError(HTTPError):
"""HTTP 5xx."""
pass
class InvalidRequestError(Error):
"""Request does not comply with the CT protocol."""
pass
class InvalidResponseError(Error):
"""Response does not comply with the CT protocol."""
pass
###############################################################################
# Common utility methods and constants. #
###############################################################################
_GET_STH_PATH = "ct/v1/get-sth"
_GET_ENTRIES_PATH = "ct/v1/get-entries"
_GET_STH_CONSISTENCY_PATH = "ct/v1/get-sth-consistency"
_GET_PROOF_BY_HASH_PATH = "ct/v1/get-proof-by-hash"
_GET_ROOTS_PATH = "ct/v1/get-roots"
_GET_ENTRY_AND_PROOF_PATH = "ct/v1/get-entry-and-proof"
_ADD_CHAIN = "ct/v1/add-chain"
def _parse_sth(sth_body):
"""Parse a serialized STH JSON response."""
sth_response = client_pb2.SthResponse()
try:
sth = json.loads(sth_body)
sth_response.timestamp = sth["timestamp"]
sth_response.tree_size = sth["tree_size"]
sth_response.sha256_root_hash = base64.b64decode(sth[
"sha256_root_hash"])
sth_response.tree_head_signature = base64.b64decode(sth[
"tree_head_signature"])
# TypeError for base64 decoding, TypeError/ValueError for invalid
# JSON field types, KeyError for missing JSON fields.
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError("Invalid STH %s\n%s" % (sth_body, e))
return sth_response
def _parse_entry(json_entry):
"""Convert a json array element to an EntryResponse."""
entry_response = client_pb2.EntryResponse()
try:
entry_response.leaf_input = base64.b64decode(
json_entry["leaf_input"])
entry_response.extra_data = base64.b64decode(
json_entry["extra_data"])
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError("Invalid entry: %s\n%s" % (json_entry, e))
return entry_response
def _parse_entries(entries_body, expected_response_size):
"""Load serialized JSON response.
Args:
entries_body: received entries.
expected_response_size: number of entries requested. Used to validate
the response.
Returns:
a list of client_pb2.EntryResponse entries.
Raises:
InvalidResponseError: response not valid.
"""
try:
response = json.loads(entries_body)
except ValueError as e:
raise InvalidResponseError("Invalid response %s\n%s" %
(entries_body, e))
try:
entries = iter(response["entries"])
except (TypeError, KeyError) as e:
raise InvalidResponseError("Invalid response: expected "
"an array of entries, got %s\n%s)" %
(response, e))
# Logs MAY honor requests where 0 <= "start" < "tree_size" and
# "end" >= "tree_size" by returning a partial response covering only
# the valid entries in the specified range.
# Logs MAY restrict the number of entries that can be retrieved per
# "get-entries" request. If a client requests more than the
# permitted number of entries, the log SHALL return the maximum
# number of entries permissible. (RFC 6962)
#
# Therefore, we cannot assume we get exactly the expected number of
# entries. However if we get none, or get more than expected, then
# we discard the response and raise.
response_size = len(response["entries"])
if not response_size or response_size > expected_response_size:
raise InvalidResponseError("Invalid response: requested %d entries,"
"got %d entries" %
(expected_response_size, response_size))
return [_parse_entry(e) for e in entries]
def _parse_consistency_proof(response, servername):
try:
response = json.loads(response)
consistency = [base64.b64decode(u) for u in response["consistency"]]
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected a base64-encoded "
"consistency proof, got %s"
"\n%s" % (servername, response, e))
return consistency
# A class that we can mock out to generate fake responses.
class RequestHandler(object):
"""HTTPS requests."""
def __init__(self, connection_timeout=60, ca_bundle=True):
self._timeout = connection_timeout
self._ca_bundle = ca_bundle
def __repr__(self):
return "%r()" % self.__class__.__name__
def __str__(self):
return "%r()" % self.__class__.__name__
def get_response(self, uri, params=None):
"""Get an HTTP response for a GET request."""
try:
return requests.get(uri, params=params, timeout=self._timeout,
verify=self._ca_bundle)
except requests.exceptions.RequestException as e:
raise HTTPError("Connection to %s failed: %s" % (uri, e))
def post_response(self, uri, post_data):
try:
return requests.post(uri, data=json.dumps(post_data),
timeout=self._timeout, verify=self._ca_bundle)
except requests.exceptions.RequestException as e:
raise HTTPError("POST to %s failed: %s" % (uri, e))
@staticmethod
def check_response_status(code, reason, content='', headers=''):
if code == 200:
return
elif 400 <= code < 500:
raise HTTPClientError("%s (%s) %s" % (reason, content, headers))
elif 500 <= code < 600:
raise HTTPServerError("%s (%s) %s" % (reason, content, headers))
else:
raise HTTPError("%s (%s) %s" % (reason, content, headers))
def get_response_body(self, uri, params=None):
response = self.get_response(uri, params=params)
self.check_response_status(response.status_code, response.reason,
response.content, response.headers)
return response.content
def post_response_body(self, uri, post_data=None):
response = self.post_response(uri, post_data=post_data)
self.check_response_status(response.status_code, response.reason,
response.content, response.headers)
return response.content
###############################################################################
# The synchronous log client. #
###############################################################################
class LogClient(object):
"""HTTP client for talking to a CT log."""
"""Create a new log client.
Args:
uri: The CT Log URI to communicate with.
handler: A custom RequestHandler to use. If not specified, a new one
will be created.
connection_timeout: Timeout (in seconds) for all GET and POST requests.
ca_bundle: True or a file path containing a set of CA roots. See
Requests documentation for more information:
http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification
Note that a false-y value is not allowed.
"""
def __init__(self, uri, handler=None, connection_timeout=60,
ca_bundle=True):
self._uri = uri
if not ca_bundle:
raise ClientError("Refusing to turn off SSL certificate checking.")
if handler:
self._req = handler
else:
self._req = RequestHandler(connection_timeout, ca_bundle)
def __repr__(self):
return "%r(%r)" % (self.__class__.__name__, self._req)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self._req.uri)
@property
def servername(self):
return self._uri
def _req_body(self, path, params=None):
return self._req.get_response_body(self._uri + "/" + path,
params=params)
def _post_req_body(self, path, post_data=None):
return self._req.post_response_body(
self._uri + "/" + path, post_data=post_data)
def _parse_sct(self, sct_response):
sct_data = json.loads(sct_response)
try:
sct = client_pb2.SignedCertificateTimestamp()
sct_version = sct_data["sct_version"]
if sct_version != 0:
raise InvalidResponseError(
"Unknown SCT version: %d" % sct_version)
sct.version = client_pb2.V1
sct.id.key_id = base64.b64decode(sct_data["id"])
sct.timestamp = sct_data["timestamp"]
hash_algorithm, sig_algorithm, sig_data = verify.decode_signature(
base64.b64decode(sct_data["signature"]))
sct.signature.hash_algorithm = hash_algorithm
sct.signature.sig_algorithm = sig_algorithm
sct.signature.signature = sig_data
return sct
except KeyError as e:
raise InvalidResponseError("SCT Missing field: %s" % e)
def get_sth(self):
"""Get the current Signed Tree Head.
Returns:
a ct.proto.client_pb2.SthResponse proto.
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
sth = self._req_body(_GET_STH_PATH)
return _parse_sth(sth)
def get_entries(self, start, end, batch_size=0):
"""Retrieve log entries.
Args:
start : index of first entry to retrieve.
end : index of last entry to retrieve.
batch_size: max number of entries to fetch in one go.
Yields:
ct.proto.client_pb2.EntryResponse protos.
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
[start, end] is not a valid range for this log.
InvalidRequestError: invalid request range (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (start, end) is a valid range
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
# Catch obvious mistakes here.
if start < 0 or end < 0 or start > end:
raise InvalidRequestError("Invalid range [%d, %d]" % (start, end))
batch_size = batch_size or FLAGS.entry_fetch_batch_size
while start <= end:
# Note that an HTTPError may occur here if the log does not have the
# requested range of entries available. RFC 6962 says:
# "Any errors will be returned as HTTP 4xx or 5xx responses, with
# human-readable error messages."
# There is thus no easy way to distinguish this case from other
# errors.
first = start
last = min(start + batch_size - 1, end)
response = self._req_body(_GET_ENTRIES_PATH,
params={"start": first, "end": last})
entries = _parse_entries(response, last - first + 1)
for entry in entries:
yield entry
# If we got less entries than requested, then we don't know whether
# the log imposed a batch limit or ran out of entries, so we keep
# trying until we get all entries, or an error response.
start += len(entries)
def get_sth_consistency(self, old_size, new_size):
"""Retrieve a consistency proof.
Args:
old_size : size of older tree.
new_size : size of newer tree.
Returns:
list of raw hashes (bytes) forming the consistency proof
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
(old_size, new_size) are not valid for this log (e.g. greater
than the size of the log).
InvalidRequestError: invalid request size (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (old_size, new_size) are valid
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
if old_size > new_size:
raise InvalidRequestError(
"old > new: %s >= %s" % (old_size, new_size))
if old_size < 0 or new_size < 0:
raise InvalidRequestError(
"both sizes must be >= 0: %s, %s" % (old_size, new_size))
# don't need to contact remote server for trivial proofs:
# - empty tree is consistent with everything
# - everything is consistent with itself
if old_size == 0 or old_size == new_size:
return []
response = self._req_body(_GET_STH_CONSISTENCY_PATH,
params={"first": old_size,
"second": new_size})
return _parse_consistency_proof(response, self.servername)
def get_proof_by_hash(self, leaf_hash, tree_size):
"""Retrieve an audit proof by leaf hash.
Args:
leaf_hash: hash of the leaf input (as raw binary string).
tree_size: size of the tree on which to base the proof.
Returns:
a client_pb2.ProofByHashResponse containing the leaf index
and the Merkle tree audit path nodes (as binary strings).
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
HTTPClientError can happen when leaf_hash is not present in the
log tree of the given size.
InvalidRequestError: invalid request (irrespective of log).
InvalidResponseError: server response is invalid for the given
request.
"""
if tree_size <= 0:
raise InvalidRequestError("Tree size must be positive (got %d)" %
tree_size)
leaf_hash = base64.b64encode(leaf_hash)
response = self._req_body(_GET_PROOF_BY_HASH_PATH,
params={"hash": leaf_hash,
"tree_size": tree_size})
response = json.loads(response)
proof_response = client_pb2.ProofByHashResponse()
try:
proof_response.leaf_index = response["leaf_index"]
proof_response.audit_path.extend(
[base64.b64decode(u) for u in response["audit_path"]])
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected a base64-encoded "
"audit proof, got %s"
"\n%s" % (self.servername, response, e))
return proof_response
def get_entry_and_proof(self, leaf_index, tree_size):
"""Retrieve an entry and its audit proof by index.
Args:
leaf_index: index of the entry.
tree_size: size of the tree on which to base the proof.
Returns:
a client_pb2.EntryAndProofResponse containing the entry
and the Merkle tree audit path nodes (as binary strings).
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
HTTPClientError can happen when tree_size is not a valid size
for this log.
InvalidRequestError: invalid request (irrespective of log).
InvalidResponseError: server response is invalid for the given
request.
"""
if tree_size <= 0:
raise InvalidRequestError("Tree size must be positive (got %d)" %
tree_size)
if leaf_index < 0 or leaf_index >= tree_size:
raise InvalidRequestError("Leaf index must be smaller than tree "
"size (got index %d vs size %d" %
(leaf_index, tree_size))
response = self._req_body(_GET_ENTRY_AND_PROOF_PATH,
params={"leaf_index": leaf_index,
"tree_size": tree_size})
response = json.loads(response)
entry_response = client_pb2.EntryAndProofResponse()
try:
entry_response.entry.CopyFrom(_parse_entry(response))
entry_response.audit_path.extend(
[base64.b64decode(u) for u in response["audit_path"]])
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected an entry and proof, got %s"
"\n%s" % (self.servername, response, e))
return entry_response
def get_roots(self):
"""Retrieve currently accepted root certificates.
Returns:
a list of certificates (as raw binary strings).
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. For logs that honour HTTP status codes,
HTTPClientError (a 4xx) should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
response = self._req_body(_GET_ROOTS_PATH)
response = json.loads(response)
try:
return [base64.b64decode(u)for u in response["certificates"]]
except (TypeError, ValueError, KeyError) as e:
raise InvalidResponseError(
"%s returned invalid data: expected a list od base64-encoded "
"certificates, got %s\n%s" % (self.servername, response, e))
def add_chain(self, certs_list):
"""Adds the given chain of certificates.
Args:
certs_list: A list of DER-encoded certificates to add.
Returns:
The SCT for the certificate.
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
sct_data = self._post_req_body(
_ADD_CHAIN,
{'chain': [base64.b64encode(certificate) for certificate in certs_list]})
return self._parse_sct(sct_data)
###############################################################################
# The asynchronous twisted log client. #
###############################################################################
class ResponseBodyHandler(protocol.Protocol):
"""Response handler for HTTP requests."""
def __init__(self, finished):
"""Initialize the one-off response handler.
Args:
finished: a deferred that will be fired with the body when the
complete response has been received; or with an error when the
connection is lost.
"""
self._finished = finished
def connectionMade(self):
self._buffer = []
self._len = 0
self._overflow = False
def dataReceived(self, data):
self._len += len(data)
if self._len > FLAGS.response_buffer_size_bytes:
# Note this flag has to be set *before* calling loseConnection()
# to ensure connectionLost gets called with the flag set.
self._overflow = True
self.transport.loseConnection()
else:
self._buffer.append(data)
def connectionLost(self, reason):
if self._overflow:
self._finished.errback(HTTPResponseSizeExceededError(
"Connection aborted: response size exceeded %d bytes" %
FLAGS.response_buffer_size_bytes))
elif not reason.check(*(error.ConnectionDone, client.ResponseDone,
http.PotentialDataLoss)):
self._finished.errback(HTTPConnectionError(
"Connection lost (received %d bytes)" % self._len))
else:
body = "".join(self._buffer)
self._finished.callback(body)
class AsyncRequestHandler(object):
"""A helper for asynchronous response body delivery."""
def __init__(self, agent):
self._agent = agent
@staticmethod
def _response_cb(response):
try:
RequestHandler.check_response_status(response.code, response.phrase,
list(response.headers.getAllRawHeaders()))
except HTTPError as e:
return failure.Failure(e)
finished = defer.Deferred()
response.deliverBody(ResponseBodyHandler(finished))
return finished
@staticmethod
def _make_request(path, params):
if not params:
return path
return path + "?" + "&".join(["%s=%s" % (key, value)
for key, value in params.iteritems()])
def get(self, path, params=None):
d = self._agent.request("GET", self._make_request(path, params))
d.addCallback(self._response_cb)
return d
class EntryProducer(object):
"""A push producer for log entries."""
implements(iweb.IBodyProducer)
def __init__(self, handler, reactor, uri, start, end,
batch_size, entries_db=None):
self._handler = handler
self._reactor = reactor
self._uri = uri
self._entries_db = entries_db
self._consumer = None
assert 0 <= start <= end
self._start = start
self._end = end
self._current = self._start
self._batch_size = batch_size
self._batches = Queue()
self._currently_fetching = 0
self._currently_stored = 0
self._last_fetching = self._current
self._max_currently_fetching = (FLAGS.max_fetchers_in_parallel *
self._batch_size)
# Required attribute of the interface.
self.length = iweb.UNKNOWN_LENGTH
self.min_delay = FLAGS.get_entries_retry_delay
@property
def finished(self):
return self._current > self._end
def __fail(self, failure):
if not self._stopped:
self.stopProducing()
self._done.errback(failure)
@staticmethod
def _calculate_retry_delay(retries):
"""Calculates delay based on number of retries which already happened.
Random is there, so we won't attack server lots of requests exactly
at the same time, and 1.3 is nice constant for exponential back-off."""
return ((0.4 + random.uniform(0.3, 0.6)) * FLAGS.get_entries_retry_delay
* 1.4**retries)
def _response_eb(self, failure, first, last, retries):
"""Error back for HTTP errors"""
if not self._paused:
# if it's not last retry and failure wasn't our fault we retry
if (retries < FLAGS.get_entries_max_retries and
not failure.check(HTTPClientError)):
logging.info("Retrying get-entries for range <%d, %d> retry: %d"
% (first, last, retries))
d = task.deferLater(self._reactor,
self._calculate_retry_delay(retries),
self._fetch_parsed_entries,
first, last)
d.addErrback(self._response_eb, first, last, retries + 1)
return d
else:
self.__fail(failure)
def _fetch_eb(self, failure):
"""Error back for errors after getting result of a request
(InvalidResponse)"""
self.__fail(failure)
def _write_pending(self):
d = defer.Deferred()
d.callback(None)
if self._pending:
self._current += len(self._pending)
self._currently_stored -= len(self._pending)
d = self._consumer.consume(self._pending)
self._pending = None
return d
def _batch_completed(self, result):
self._currently_fetching -= len(result)
self._currently_stored += len(result)
return result
def _store_batch(self, entry_batch, start_index):
assert self._entries_db
d = threads.deferToThread(self._entries_db.store_entries,
enumerate(entry_batch, start_index))
d.addCallback(lambda _: entry_batch)
return d
def _get_entries_from_db(self, first, last):
if FLAGS.persist_entries and self._entries_db:
d = threads.deferToThread(self._entries_db.scan_entries, first, last)
d.addCallbacks(lambda entries: list(entries))
d.addErrback(lambda fail: fail.trap(database.KeyError) and None)
return d
else:
d = defer.Deferred()
d.callback(None)
return d
def _fetch_parsed_entries(self, first, last):
# first check in database
d = self._get_entries_from_db(first, last)
d.addCallback(self._sub_fetch_parsed_entries, first, last)
return d
def _sub_fetch_parsed_entries(self, entries, first, last):
# it's not the best idea to attack server with many requests exactly at
# the same time, so requests are sent after slight delay.
if not entries:
request = task.deferLater(self._reactor,
self._calculate_retry_delay(0),
self._handler.get,
self._uri + "/" + _GET_ENTRIES_PATH,
params={"start": str(first),
"end": str(last)})
request.addCallback(_parse_entries, last - first + 1)
if self._entries_db and FLAGS.persist_entries:
request.addCallback(self._store_batch, first)
entries = request
else:
deferred_entries = defer.Deferred()
deferred_entries.callback(entries)
entries = deferred_entries
return entries
def _create_next_request(self, first, last, entries, retries):
d = self._fetch_parsed_entries(first, last)
d.addErrback(self._response_eb, first, last, retries)
d.addCallback(lambda result: (entries + result, len(result)))
d.addCallback(self._fetch, first, last, retries)
return d
def _fetch(self, result, first, last, retries):
entries, last_fetched_entries_count = result
next_range_start = first + last_fetched_entries_count
if next_range_start > last:
return entries
return self._create_next_request(next_range_start, last,
entries, retries)
def _create_fetch_deferred(self, first, last, retries=0):
d = defer.Deferred()
d.addCallback(self._fetch, first, last, retries)
d.addCallback(self._batch_completed)
d.addErrback(self._fetch_eb)
d.callback(([], 0))
return d
@defer.deferredGenerator
def produce(self):
"""Produce entries."""
while not self._paused:
wfd = defer.waitForDeferred(self._write_pending())
yield wfd
wfd.getResult()
if self.finished:
self.finishProducing()
return
first = self._last_fetching
while (self._currently_fetching <= self._max_currently_fetching and
self._last_fetching <= self._end and
self._currently_stored <= FLAGS.entries_buffer):
last = min(self._last_fetching + self._batch_size - 1, self._end,
self._last_fetching + self._max_currently_fetching
- self._currently_fetching + 1)
self._batches.put(self._create_fetch_deferred(first, last))
self._currently_fetching += last - first + 1
first = last + 1
self._last_fetching = first
wfd = defer.waitForDeferred(self._batches.get())
# Pause here until the body of the response is available.
yield wfd
# The producer may have been paused while waiting for the response,
# or errored out upon receiving it: do not write the entries out
# until after the next self._paused check.
self._pending = wfd.getResult()
def startProducing(self, consumer):
"""Start producing entries.
The producer writes EntryResponse protos to the consumer in batches,
until all entries have been received, or an error occurs.
Args:
consumer: the consumer to write to.
Returns:
a deferred that fires when no more entries will be written.
Upon success, this deferred fires number of produced entries or
None if production wasn't successful. Upon failure, this deferred
fires with the appropriate HTTPError.
Raises:
RuntimeError: consumer already registered.
"""
if self._consumer:
raise RuntimeError("Producer already has a consumer registered")
self._consumer = consumer
self._stopped = False
self._paused = True
self._pending = None
self._done = defer.Deferred()
# An IBodyProducer should start producing immediately, without waiting
# for an explicit resumeProducing() call.
task.deferLater(self._reactor, 0, self.resumeProducing)
return self._done
def pauseProducing(self):
self._paused = True
def resumeProducing(self):
if self._paused and not self._stopped:
self._paused = False
self.produce()
def stopProducing(self):
self._paused = True
self._stopped = True
def finishProducing(self, success=True):
self.stopProducing()
if success:
self._done.callback(self._end - self._start + 1)
else:
self._done.callback(None)
class AsyncLogClient(object):
"""A twisted log client."""
def __init__(self, agent, uri, entries_db=None, reactor=ireactor):
"""Initialize the client.
If entries_db is specified and flag persist_entries is true, get_entries
will return stored entries.
Args:
agent: the agent to use.
uri: the uri of the log.
entries_db: object that conforms TempDB API
reactor: the reactor to use. Default is twisted.internet.reactor.
"""
self._handler = AsyncRequestHandler(agent)
#twisted expects bytes, so if uri is unicode we have to change encoding
self._uri = uri.encode('ascii')
self._reactor = reactor
self._entries_db = entries_db
@property
def servername(self):
return self._uri
def get_sth(self):
"""Get the current Signed Tree Head.
Returns:
a Deferred that fires with a ct.proto.client_pb2.SthResponse proto.
Raises:
HTTPError, HTTPConnectionError, HTTPClientError,
HTTPResponseSizeExceededError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
deferred_result = self._handler.get(self._uri + "/" + _GET_STH_PATH)
deferred_result.addCallback(_parse_sth)
return deferred_result
def get_entries(self, start, end, batch_size=0):
"""Retrieve log entries.
Args:
start: index of first entry to retrieve.
end: index of last entry to retrieve.
batch_size: max number of entries to fetch in one go.
Returns:
an EntryProducer for the given range.
Raises:
InvalidRequestError: invalid request range (irrespective of log).
Caller is responsible for ensuring that (start, end) is a valid range
(by retrieving an STH first), otherwise a HTTPClientError may occur
during production.
"""
# Catch obvious mistakes here.
if start < 0 or end < 0 or start > end:
raise InvalidRequestError("Invalid range [%d, %d]" % (start, end))
batch_size = batch_size or FLAGS.entry_fetch_batch_size
return EntryProducer(self._handler, self._reactor, self._uri,
start, end, batch_size, self._entries_db)
def get_sth_consistency(self, old_size, new_size):
"""Retrieve a consistency proof.
Args:
old_size : size of older tree.
new_size : size of newer tree.
Returns:
a Deferred that fires with list of raw hashes (bytes) forming the
consistency proof
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
(old_size, new_size) are not valid for this log (e.g. greater
than the size of the log).
InvalidRequestError: invalid request size (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (old_size, new_size) are valid
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
if old_size > new_size:
raise InvalidRequestError(
"old > new: %s >= %s" % (old_size, new_size))
if old_size < 0 or new_size < 0:
raise InvalidRequestError(
"both sizes must be >= 0: %s, %s" % (old_size, new_size))
# don't need to contact remote server for trivial proofs:
# - empty tree is consistent with everything
# - everything is consistent with itself
if old_size == 0 or old_size == new_size:
d = defer.Deferred()
d.callback([])
return d
deferred_response = self._handler.get(self._uri + "/" +
_GET_STH_CONSISTENCY_PATH,
params={"first": old_size,
"second": new_size})
deferred_response.addCallback(_parse_consistency_proof, self.servername)
return deferred_response
|
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import NotFittedError, check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calcuate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or and negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=self.verbose)
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calcuate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
# Default timezone for clear logging
TIME_ZONE = 'UTC'
ENV_NAME = os.environ.get("ENV_NAME", "fuel_system_test")
ISO_PATH = os.environ.get('ISO_PATH')
DNS = os.environ.get('DNS', '8.8.8.8')
OPENSTACK_RELEASE_CENTOS = 'centos'
OPENSTACK_RELEASE_UBUNTU = 'ubuntu'
OPENSTACK_RELEASE_REDHAT = 'rhos 3.0 for rhel 6.4'
OPENSTACK_RELEASE = os.environ.get(
'OPENSTACK_RELEASE', OPENSTACK_RELEASE_CENTOS).lower()
REDHAT_LICENSE_TYPE = os.environ.get('REDHAT_LICENSE_TYPE')
REDHAT_USERNAME = os.environ.get('REDHAT_USERNAME')
REDHAT_PASSWORD = os.environ.get('REDHAT_PASSWORD')
REDHAT_SATELLITE_HOST = os.environ.get('REDHAT_SATELLITE_HOST')
REDHAT_ACTIVATION_KEY = os.environ.get('REDHAT_ACTIVATION_KEY')
DEPLOYMENT_MODE_SIMPLE = "multinode"
DEPLOYMENT_MODE_HA = "ha_compact"
DEPLOYMENT_MODE = os.environ.get("DEPLOYMENT_MODE", DEPLOYMENT_MODE_HA)
ADMIN_NODE_SETUP_TIMEOUT = os.environ.get("ADMIN_NODE_SETUP_TIMEOUT", 30)
PUPPET_TIMEOUT = os.environ.get("PUPPET_TIMEOUT", 6000)
HARDWARE = {
"admin_node_memory": os.environ.get("ADMIN_NODE_MEMORY", 2048),
"admin_node_cpu": os.environ.get("ADMIN_NODE_CPU", 2),
"slave_node_cpu": os.environ.get("SLAVE_NODE_CPU", 1),
}
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
slave_mem_default = 2560
else:
slave_mem_default = 2048
HARDWARE["slave_node_memory"] = int(
os.environ.get("SLAVE_NODE_MEMORY", slave_mem_default))
NODE_VOLUME_SIZE = int(os.environ.get('NODE_VOLUME_SIZE', 50))
NODES_COUNT = os.environ.get('NODES_COUNT', 10)
MULTIPLE_NETWORKS = os.environ.get('MULTIPLE_NETWORKS', False) == 'true'
if MULTIPLE_NETWORKS:
NODEGROUPS = (
{
'name': 'default',
'pools': ['admin', 'public', 'management', 'private',
'storage']
},
{
'name': 'group-custom-1',
'pools': ['admin2', 'public2', 'management2', 'private2',
'storage2']
}
)
FORWARD_DEFAULT = os.environ.get('FORWARD_DEFAULT', 'route')
ADMIN_FORWARD = os.environ.get('ADMIN_FORWARD', 'nat')
PUBLIC_FORWARD = os.environ.get('PUBLIC_FORWARD', 'nat')
else:
NODEGROUPS = {}
FORWARD_DEFAULT = os.environ.get('FORWARD_DEFAULT', None)
ADMIN_FORWARD = os.environ.get('ADMIN_FORWARD', FORWARD_DEFAULT or 'nat')
PUBLIC_FORWARD = os.environ.get('PUBLIC_FORWARD', FORWARD_DEFAULT or 'nat')
MGMT_FORWARD = os.environ.get('MGMT_FORWARD', FORWARD_DEFAULT)
PRIVATE_FORWARD = os.environ.get('PRIVATE_FORWARD', FORWARD_DEFAULT)
STORAGE_FORWARD = os.environ.get('STORAGE_FORWARD', FORWARD_DEFAULT)
DEFAULT_INTERFACE_ORDER = 'admin,public,management,private,storage'
INTERFACE_ORDER = os.environ.get('INTERFACE_ORDER',
DEFAULT_INTERFACE_ORDER).split(',')
FORWARDING = {
'admin': ADMIN_FORWARD,
'public': PUBLIC_FORWARD,
'management': MGMT_FORWARD,
'private': PRIVATE_FORWARD,
'storage': STORAGE_FORWARD,
}
DHCP = {
'admin': False,
'public': False,
'management': False,
'private': False,
'storage': False,
}
INTERFACES = {
'admin': 'eth0',
'public': 'eth1',
'management': 'eth2',
'private': 'eth3',
'storage': 'eth4',
}
# May be one of virtio, e1000, pcnet, rtl8139
INTERFACE_MODEL = os.environ.get('INTERFACE_MODEL', 'virtio')
POOL_DEFAULT = os.environ.get('POOL_DEFAULT', '10.108.0.0/16:24')
POOL_ADMIN = os.environ.get('POOL_ADMIN', POOL_DEFAULT)
POOL_PUBLIC = os.environ.get('POOL_PUBLIC', POOL_DEFAULT)
POOL_MANAGEMENT = os.environ.get('POOL_MANAGEMENT', POOL_DEFAULT)
POOL_PRIVATE = os.environ.get('POOL_PRIVATE', POOL_DEFAULT)
POOL_STORAGE = os.environ.get('POOL_STORAGE', POOL_DEFAULT)
DEFAULT_POOLS = {
'admin': POOL_ADMIN,
'public': POOL_PUBLIC,
'management': POOL_MANAGEMENT,
'private': POOL_PRIVATE,
'storage': POOL_STORAGE,
}
POOLS = {
'admin': os.environ.get(
'PUBLIC_POOL',
DEFAULT_POOLS.get('admin')).split(':'),
'public': os.environ.get(
'PUBLIC_POOL',
DEFAULT_POOLS.get('public')).split(':'),
'management': os.environ.get(
'PRIVATE_POOL',
DEFAULT_POOLS.get('management')).split(':'),
'private': os.environ.get(
'INTERNAL_POOL',
DEFAULT_POOLS.get('private')).split(':'),
'storage': os.environ.get(
'NAT_POOL',
DEFAULT_POOLS.get('storage')).split(':'),
}
if MULTIPLE_NETWORKS:
FORWARDING['admin2'] = ADMIN_FORWARD
FORWARDING['public2'] = PUBLIC_FORWARD
FORWARDING['management2'] = MGMT_FORWARD
FORWARDING['private2'] = PRIVATE_FORWARD
FORWARDING['storage2'] = STORAGE_FORWARD
DHCP['admin2'] = False
DHCP['public2'] = False
DHCP['management2'] = False
DHCP['private2'] = False
DHCP['storage2'] = False
INTERFACES['admin2'] = 'eth5'
POOL_DEFAULT2 = os.environ.get('POOL_DEFAULT2', '10.108.0.0/16:24')
POOL_ADMIN2 = os.environ.get('POOL_ADMIN2', POOL_DEFAULT2)
POOL_PUBLIC2 = os.environ.get('POOL_PUBLIC2', POOL_DEFAULT2)
POOL_MANAGEMENT2 = os.environ.get('POOL_MANAGEMENT', POOL_DEFAULT2)
POOL_PRIVATE2 = os.environ.get('POOL_PRIVATE', POOL_DEFAULT2)
POOL_STORAGE2 = os.environ.get('POOL_STORAGE', POOL_DEFAULT2)
CUSTOM_POOLS = {
'admin2': POOL_ADMIN2,
'public2': POOL_PUBLIC2,
'management2': POOL_MANAGEMENT2,
'private2': POOL_PRIVATE2,
'storage2': POOL_STORAGE2,
}
POOLS['admin2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('admin2')).split(':')
POOLS['public2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('public2')).split(':')
POOLS['management2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('management2')).split(':')
POOLS['private2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('private2')).split(':')
POOLS['storage2'] = os.environ.get(
'PUBLIC_POOL2',
CUSTOM_POOLS.get('storage2')).split(':')
CUSTOM_INTERFACE_ORDER = os.environ.get(
'CUSTOM_INTERFACE_ORDER',
'admin2,public2,management2,private2,storage2')
INTERFACE_ORDER.extend(CUSTOM_INTERFACE_ORDER.split(','))
BONDING = os.environ.get("BONDING", 'false') == 'true'
BONDING_INTERFACES = {
'admin': ['eth0'],
'public': ['eth1', 'eth2', 'eth3', 'eth4']
}
NETWORK_MANAGERS = {
'flat': 'FlatDHCPManager',
'vlan': 'VlanManager'
}
NEUTRON = 'neutron'
NEUTRON_SEGMENT = {
'gre': 'gre',
'vlan': 'vlan'
}
LOGS_DIR = os.environ.get('LOGS_DIR', os.getcwd())
USE_ALL_DISKS = os.environ.get('USE_ALL_DISKS', 'true') == 'true'
UPLOAD_MANIFESTS = os.environ.get('UPLOAD_MANIFESTS', 'false') == 'true'
SYNC_DEPL_TASKS = os.environ.get('SYNC_DEPL_TASKS', 'false') == 'true'
UPLOAD_MANIFESTS_PATH = os.environ.get(
'UPLOAD_MANIFESTS_PATH', '~/git/fuel/deployment/puppet/')
SITEPP_FOR_UPLOAD = os.environ.get(
'SITEPP_PATH', '/etc/puppet/modules/osnailyfacter/examples/site.pp')
UPLOAD_PATCHSET = os.environ.get('UPLOAD_PATCHSET', 'false') == 'true'
GERRIT_REFSPEC = os.environ.get('GERRIT_REFSPEC')
PATCH_PATH = os.environ.get(
'PATCH_PATH', '/tmp/fuel-ostf')
KVM_USE = os.environ.get('KVM_USE', 'false') == 'true'
VCENTER_USE = os.environ.get('VCENTER_USE', 'false') == 'true'
DEBUG_MODE = os.environ.get('DEBUG_MODE', 'true') == 'true'
# vCenter tests
VCENTER_IP = os.environ.get('VCENTER_IP')
VCENTER_USERNAME = os.environ.get('VCENTER_USERNAME')
VCENTER_PASSWORD = os.environ.get('VCENTER_PASSWORD')
VCENTER_CLUSTERS = os.environ.get('VCENTER_CLUSTERS')
# Cinder with VMDK backend settings
VC_HOST = os.environ.get('VCENTER_IP')
VC_USER = os.environ.get('VCENTER_USERNAME')
VC_PASSWORD = os.environ.get('VCENTER_PASSWORD')
VC_DATACENTER = os.environ.get('VC_DATACENTER')
VC_DATASTORE = os.environ.get('VC_DATASTORE')
VC_IMAGE_DIR = os.environ.get('VC_IMAGE_DIR')
IMAGES_VCENTER = os.environ.get('IMAGES_VCENTER')
# Services tests
SERVTEST_LOCAL_PATH = os.environ.get('SERVTEST_LOCAL_PATH', '/tmp')
SERVTEST_USERNAME = os.environ.get('SERVTEST_USERNAME', 'admin')
SERVTEST_PASSWORD = os.environ.get('SERVTEST_PASSWORD', SERVTEST_USERNAME)
SERVTEST_TENANT = os.environ.get('SERVTEST_TENANT', SERVTEST_USERNAME)
SERVTEST_SAHARA_VANILLA_2_IMAGE = ('sahara-juno-vanilla-'
'2.4.1-ubuntu-14.04.qcow2')
SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME = 'sahara-juno-vanilla-2.4.1-ubuntu-14.04'
SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5 = 'e32bef0d3bc4b2c906f5499e14f9b377'
SERVTEST_SAHARA_VANILLA_2_IMAGE_META = {'_sahara_tag_2.4.1': 'True',
'_sahara_tag_vanilla': 'True',
'_sahara_username': 'ubuntu'}
SERVTEST_MURANO_IMAGE = "ubuntu_14_04-murano-agent_stable_juno.qcow2"
SERVTEST_MURANO_IMAGE_MD5 = '9f562f3f577dc32698c11a99d3f15070'
SERVTEST_MURANO_IMAGE_NAME = 'murano'
SERVTEST_MURANO_IMAGE_META = {
'murano_image_info': '{"type": "linux", "title": "murano"}'}
DEFAULT_IMAGES_CENTOS = os.environ.get(
'DEFAULT_IMAGES_CENTOS',
'/var/lib/libvirt/images/centos6.4-base.qcow2')
DEFAULT_IMAGES_UBUNTU = os.environ.get(
'DEFAULT_IMAGES_UBUNTU',
'/var/lib/libvirt/images/ubuntu-12.04.1-server-amd64-p2.qcow2')
OS_IMAGE = os.environ.get('OS_IMAGE', DEFAULT_IMAGES_CENTOS)
OSTF_TEST_NAME = os.environ.get('OSTF_TEST_NAME',
'Check network connectivity'
' from instance via floating IP')
OSTF_TEST_RETRIES_COUNT = int(os.environ.get('OSTF_TEST_RETRIES_COUNT', 50))
# The variable below is only for test:
# fuelweb_test.tests.tests_strength.test_ostf_repeatable_tests
# :OstfRepeatableTests.run_ostf_n_times_against_custom_deployment
DEPLOYMENT_NAME = os.environ.get('DEPLOYMENT_NAME')
# Need for iso with docker
TIMEOUT = int(os.environ.get('TIMEOUT', 60))
ATTEMPTS = int(os.environ.get('ATTEMPTS', 5))
#Create snapshots as last step in test-case
MAKE_SNAPSHOT = os.environ.get('MAKE_SNAPSHOT', 'false') == 'true'
NEUTRON_ENABLE = os.environ.get('NEUTRON_ENABLE', 'false') == 'true'
NEUTRON_SEGMENT_TYPE = os.environ.get('NEUTRON_SEGMENT_TYPE',
NEUTRON_SEGMENT["vlan"])
FUEL_SETTINGS_YAML = os.environ.get('FUEL_SETTINGS_YAML',
'/etc/fuel/astute.yaml')
# TarBall data for updates and upgrades
TARBALL_PATH = os.environ.get('TARBALL_PATH')
UPGRADE_FUEL_FROM = os.environ.get('UPGRADE_FUEL_FROM', '6.0.1')
UPGRADE_FUEL_TO = os.environ.get('UPGRADE_FUEL_TO', '6.1')
SNAPSHOT = os.environ.get('SNAPSHOT', '')
# For 5.1.1 we have 2 releases in tarball and should specify what we need
RELEASE_VERSION = os.environ.get('RELEASE_VERSION', "2014.2.2-6.1")
# URL to custom mirror with new OSCI packages wich should be tested,
# for example:
# CentOS: http://osci-obs.vm.mirantis.net:82/centos-fuel-master-20921/centos/
# Ubuntu: http://osci-obs.vm.mirantis.net:82/ubuntu-fuel-master-20921/ubuntu/
CUSTOM_PKGS_MIRROR = os.environ.get('CUSTOM_PKGS_MIRROR', '')
# Location of local mirrors on master node.
LOCAL_MIRROR_UBUNTU = os.environ.get('LOCAL_MIRROR_UBUNTU',
'/var/www/nailgun/ubuntu/x86_64')
LOCAL_MIRROR_CENTOS = os.environ.get('LOCAL_MIRROR_CENTOS',
'/var/www/nailgun/centos/x86_64')
# Release name of local Ubuntu mirror on Fuel master node.
UBUNTU_RELEASE = os.environ.get('UBUNTU_RELEASE', 'precise')
UPDATE_TIMEOUT = os.environ.get('UPDATE_TIMEOUT', 3600)
IMAGE_PROVISIONING = os.environ.get('IMAGE_PROVISIONING', 'false') == 'true'
KEYSTONE_CREDS = {'username': os.environ.get('KEYSTONE_USERNAME', 'admin'),
'password': os.environ.get('KEYSTONE_PASSWORD', 'admin'),
'tenant_name': os.environ.get('KEYSTONE_TENANT', 'admin')}
SSH_CREDENTIALS = {
'login': os.environ.get('ENV_FUEL_LOGIN', 'root'),
'password': os.environ.get('ENV_FUEL_PASSWORD', 'r00tme')}
# Plugin path for plugins tests
GLUSTER_PLUGIN_PATH = os.environ.get('GLUSTER_PLUGIN_PATH')
GLUSTER_CLUSTER_ENDPOINT = os.environ.get('GLUSTER_CLUSTER_ENDPOINT')
EXAMPLE_PLUGIN_PATH = os.environ.get('EXAMPLE_PLUGIN_PATH')
LBAAS_PLUGIN_PATH = os.environ.get('LBAAS_PLUGIN_PATH')
FUEL_STATS_CHECK = os.environ.get('FUEL_STATS_CHECK', 'false') == 'true'
FUEL_STATS_ENABLED = os.environ.get('FUEL_STATS_ENABLED', 'true') == 'true'
FUEL_STATS_SSL = os.environ.get('FUEL_STATS_SSL', 'true') == 'true'
FUEL_STATS_HOST = os.environ.get('FUEL_STATS_HOST',
'172.18.2.169')
FUEL_STATS_PORT = os.environ.get('FUEL_STATS_PORT', '443')
CUSTOM_ENV = os.environ.get('CUSTOM_ENV', 'false') == 'true'
BUILD_IMAGES = os.environ.get('BUILD_IMAGES', 'false') == 'true'
STORE_ASTUTE_YAML = os.environ.get('STORE_ASTUTE_YAML', 'false') == 'true'
|
|
#! /usr/bin/env python
# Immunity libdisassemble
#
# Most of the functions are ported from the great libdisassm since we
# we are using their opcode map.
# TODO:
# - Fix the getSize(), doesn't work well with all the opcodes
# - Enhance the metadata info with more information on opcode.
# i.e. we need a way to know if an address is an immediate, a relative offset, etc
# - Fix the jmp (SIB) opcode in at&t that it has different output that the others.
# - support all the PREFIX*
# NOTE: This is less than a week work, so it might be full of bugs (we love bugs!)
#
# Any question, comments, hate mail: [email protected]
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# This code largely copyright Immunity, Inc (2004), parts
# copyright mammon and used under the LGPL by permission
import disassemblers.libdisassemble.opcode86 as opcode86
import struct
from sys import *
table86=opcode86.tables86
OP_PERM_MASK = 0x0000007
OP_TYPE_MASK = 0x0000F00
OP_MOD_MASK = 0x00FF000
OP_SEG_MASK = 0x00F0000
OP_SIZE_MASK = 0x0F000000
class Mode:
def __init__(self, type, mode=32):
self.type = type #type & 0x700
#self.flag = type & 0x7
self.flag = type & OP_PERM_MASK
self.length = 0
self.mode = mode
# format "AT&T" or "INTEL"
def printOpcode(self, format, eip=0):
return "Not available"
def getType(self):
return self.type
def getSize(self):
return self.length
def getFlag(self):
return self.flag
def getSFlag(self):
return ("R", "W", "X")[self.flag/2]
def getOpSize(self):
return (self.type & OP_SIZE_MASK)
def getAddrMeth(self):
return (self.type & opcode86.ADDRMETH_MASK)
class Register(Mode):
def __init__(self, regndx, type=opcode86.OP_REG):
Mode.__init__(self, type)
#print regndx
(self.name, self.detail, self.length)=opcode86.regs[regndx]
def printOpcode(self, format="INTEL", eip=0):
if format == "INTEL":
return self.name
else:
return "%%%s" % self.name
def getName(self):
return self.name
class Address(Mode):
def __init__(self, data, length, type=opcode86.ADDEXP_DISP_OFFSET, signed=1, relative = None):
Mode.__init__(self, type)
self.signed=signed
self.length = length
#print "'%s' %d %x, %s"%(data, length, type, relative)
fmt = "<"
if self.signed:
fmt += ("b", "h", "l")[length//2]
else:
fmt += ("B", "H", "L")[length//2]
if (self.getAddrMeth() == opcode86.ADDRMETH_A):
fmt += "H"
length += 2
self.value, self.segment = struct.unpack(fmt, data[:length])
else:
self.value, = struct.unpack(fmt, data[:length])
self.segment = None
self.relative = relative
def printOpcode(self, format="INTEL", eip=0, exp=0):
value = self.value
segment = self.segment
if (self.relative):
value += eip
if format == "INTEL":
tmp=""
if (segment):
tmp += "0x%04x:"%(segment)
if self.signed:
if value < 0:
return "%s-0x%x" % (tmp, value * -1)
return "%s0x%x" % (tmp,self.value)
else:
#if self.length == 4 or not self.signed:
return "%s0x%x" % (tmp,self.value)
#else:
else:
pre=""
#if self.getAddrMeth() == opcode86.ADDRMETH_E and not exp:
if (self.getAddrMeth() == opcode86.ADDRMETH_I or self.getAddrMeth() == opcode86.ADDRMETH_A or self.type & opcode86.OP_IMM) and not exp:
pre+="$"
if segment:
pre = "$0x%04x:%s"%(segment,pre)
if (value < 0):
if (self.signed):
return "%s0x%0x" % (pre, ((1<<self.length*8) + value))
else:
return "%s-0x%0x" % (pre, (-value))
else:
return "%s0x%0x" % (pre, (value))
class Expression(Mode):
def __init__(self, disp, base, type):
Mode.__init__(self, type)
self.disp = disp
self.base = base
self.psize = 4
def setPsize(self, size):
self.psize= size
def getPsize(self):
return self.psize
def getType(self):
return EXPRESSION
def printOpcode(self, format="INTEL", eip=0):
tmp=""
if format == "INTEL":
if self.base:
tmp += self.base.printOpcode(format, eip)
if self.disp:
if self.disp.value:
if self.disp.value > 0 and tmp:
tmp+="+"
tmp += self.disp.printOpcode(format, eip, 1)
pre=""
optype=self.getOpSize()
addr_meth=self.getAddrMeth()
if addr_meth == opcode86.ADDRMETH_E:
if optype == opcode86.OPTYPE_b:
pre="BYTE PTR"
elif optype== opcode86.OPTYPE_w:
pre="WORD PTR"
else :
pre="DWORD PTR"
tmp="%s [%s]" % (pre, tmp)
else:
if self.base:
tmp+="(%s)" % self.base.printOpcode(format, eip)
if self.disp:
tmp= "%s%s" % (self.disp.printOpcode(format, eip, 1), tmp)
#tmp="Not available"
return tmp
class SIB(Mode):
def __init__(self, scale, base, index):
self.scale = scale
self.base = base
self.index = index
def printOpcode(self, format="INTEL", eip=0):
tmp=""
if format == "INTEL":
if self.base:
tmp+="%s" % self.base.printOpcode(format, eip)
if self.scale > 1:
tmp+= "*%d" % self.scale
if self.index:
if tmp:
tmp+="+"
tmp+="%s" % self.index.printOpcode(format, eip)
else:
if self.base:
tmp+="%s" % self.base.printOpcode(format, eip)
if self.index:
#if tmp:
#tmp+=","
tmp += ", %s" % self.index.printOpcode(format, eip)
if self.scale:
if (self.scale > 1 or self.index):
tmp+=", %d" % self.scale
return tmp
return tmp
class Prefix:
def __init__(self, ndx, ptr):
self.ptr = ptr
self.type = opcode86.prefix_table[ndx]
def getType(self):
return self.type
def getName(self):
if self.ptr[6]:
return self.ptr[6]
else:
return ""
class Opcode:
def __init__(self, data, mode=32):
self.length = 0
self.mode = mode
if mode == 64:
self.addr_size = 4
else:
self.addr_size = mode/8 # 32-bit mode = 4 bytes. 16-bit mode = 2 bytes
self.data = data
self.off = 0
self.source = ""
self.dest = ""
self.aux = ""
self.prefix = []
self.parse(table86[0], self.off)
def getOpcodetype(self):
return self.opcodetype
def parse(self, table, off):
"""
Opcode.parse() is the core logic of libdisassemble. It recurses through the supplied bytes digesting prefixes and opcodes, and then handles operands.
"""
try: ## Crash Gracefully with a "invalid" opcode
self.addr_size = 4
ndx = self.data[off]
### This next line slices and dices the opcode to make it fit correctly into the current lookup table.
#
# byte min shift mask
# (tbl_0F, 0, 0xff, 0, 0xff),
# (tbl_80, 3, 0x07, 0, 0xff),
#
# simple subtraction
# shift bits right (eg. >> 4 makes each line in the table valid for 16 numbers... ie 0xc0-0xcf are all one entry in the table)
# mask part of the byte (eg. & 0x7 only makes use of the 00000111 bits...)
if (ndx > table[4]):
table = table86[table[5]] # if the opcode byte falls outside the bounds of accepted values, use the table pointed to as table[5]
ndx = ( (ndx - table[3]) >> table[1]) & table[2]
ptr = table[0][ndx] # index from table
if ptr[1] == opcode86.INSTR_PREFIX or (ptr[1] & opcode86.INSTR_PREFIX64 and self.mode == 64):
# You can have more than one prefix (!?)
if ptr[0] != 0 and len(self.data) > off and self.data[off+1] == 0x0F:
self.parse(table86[ptr[0]], off+2) # eg. 660Fxx, F20Fxx, F30Fxx, etc...
else:
self.prefix.append( Prefix(ndx, ptr) )
self.parse(table, off+1) # parse next instruction
return
if ptr[0] != 0:
# > 1 byte length opcode
self.parse(table86[ptr[0]], off+1)
return
### End Recursion, we hit a leaf node.
self.opcode = ptr[6]
self.opcodetype = ptr[1]
self.cpu = ptr[5]
self.off = off + 1 # This instruction
if table[2] != 0xff: # Is this byte split between opcode and operand? If so, let's not skip over this byte quite yet...
self.off-=1
#print >>stderr,(" opcode = %s\n opcodetype = %x\n cpu = %x\n off = %d"%(ptr[6], ptr[1], ptr[5], off+1))
n_bytes=0
# src dst aux
values=['', '', '' ]
r = [False]*3
w = [False]*3
#print self.off
for a in range(2, 5):
ret = (0, None)
tmp =ptr[a]
addr_meth = tmp & opcode86.ADDRMETH_MASK;
if addr_meth == opcode86.OP_REG:
# what do i supposed to do?
pass
operand_size = self.get_operand_size(tmp)
#print operand_size
if operand_size == 1:
genreg = opcode86.REG_BYTE_OFFSET
elif operand_size == 2:
genreg = opcode86.REG_WORD_OFFSET
else:
genreg= opcode86.REG_DWORD_OFFSET
# Begin hashing on the ADDRMETH for this operand. This should determine the number of bytes to advance in the data.
if addr_meth == opcode86.ADDRMETH_E:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, genreg, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_M:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, genreg, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_N:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, opcode86.REG_MMX_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_Q:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, opcode86.REG_MMX_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_R:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, genreg, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_W:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, opcode86.REG_SIMD_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_C:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_reg, opcode86.REG_CTRL_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_D:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_reg, opcode86.REG_DEBUG_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_G:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_reg, genreg, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_P:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_reg, opcode86.REG_MMX_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_S:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_reg, opcode86.REG_SEG_OFFSET, self.addr_size, tmp)
#elif addr_meth == opcode86.ADDRMETH_T: #TEST REGISTERS?:
#ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, opcode86.REG_TEST_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_U:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_EA, opcode86.REG_SIMD_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_V:
ret=self.get_modrm(self.data[self.off:], opcode86.MODRM_reg, opcode86.REG_SIMD_OFFSET, self.addr_size, tmp)
elif addr_meth == opcode86.ADDRMETH_A:
ret= (self.addr_size, Address(self.data[self.off:], self.addr_size, tmp, signed=0))
elif addr_meth == opcode86.ADDRMETH_F:
# eflags, so what?
pass
elif addr_meth == opcode86.ADDRMETH_I:
if tmp & opcode86.OP_SIGNED:
ret = (operand_size, Address( self.data[self.off+n_bytes:], operand_size, tmp))
#ret = (self.addr_size, Address( self.data[self.off+bytes:], operand_size, tmp))
else:
ret = (operand_size, Address( self.data[self.off+n_bytes:], operand_size,tmp, signed=0))
#ret = (self.addr_size, Address( self.data[self.off+bytes:], operand_size,tmp, signed=0))
elif addr_meth == opcode86.ADDRMETH_J:
ret = (operand_size, Address( self.data[self.off+n_bytes:], operand_size, tmp, signed=1, relative=True))
#ret = (self.addr_size, Address( self.data[self.off+bytes:], operand_size, tmp, signed=1, relative=True))
elif addr_meth == opcode86.ADDRMETH_O:
ret = (self.addr_size, Address( self.data[self.off:], self.addr_size, tmp, signed=0))
elif addr_meth == opcode86.ADDRMETH_X:
ret = (0, Register(6+opcode86.REG_DWORD_OFFSET, tmp))
elif addr_meth == opcode86.ADDRMETH_Y:
ret = (0, Register(7+opcode86.REG_DWORD_OFFSET, tmp))
else:
if tmp & opcode86.OP_REG:
regoff = 0
if self.mode == 64 and self.opcodetype in [opcode86.INS_PUSH, opcode86.INS_POP]:
regoff = opcode86.REG_QWORD_OFFSET - opcode86.REG_DWORD_OFFSET
if self.rex('w'):
regoff -= 16
if self.rex('b'):
regoff += 8
ret = (0, Register(ptr[5+a]+regoff, tmp))
elif tmp & opcode86.OP_IMM:
ret = (0, Address(bytes([ptr[5+a]]), 1, signed=0))
else:
ret= (0, None)
if ret[1]:
if isinstance(ret[1], Expression):
ret[1].setPsize(operand_size)
values[a-2]=ret[1]
r[a-2] = (tmp & opcode86.OP_R) != 0
w[a-2] = (tmp & opcode86.OP_W) != 0
n_bytes += ret[0]
self.source = values[0]
self.dest = values[1]
self.aux = values[2]
self.r = r
self.w = w
self.off += n_bytes
#self.data = self.data[:self.off]
except IndexError:
output = ""
for i in range(len(self.data)):
output += "%02x"%self.data[i]
print (("Error Parsing Opcode - Data: %s\t Offset: 0x%x"%(output,self.off)), file=stderr)
x,y,z = exc_info()
excepthook(x,y,z)
def getSize(self):
return self.off
def get_operand_size(self, opflag):
"""
get_operand_size() gets the operand size, not the address-size or the size of the opcode itself.
But it's also bastardized, because it manipulates self.addr_size at the top
"""
size=self.mode / 8 #initial setting (4 for 32-bit mode)
if self.mode == 64:
size = 4
flag = opflag & opcode86.OPTYPE_MASK
#print "flag=%x mode=%d"%(flag,self.mode)
if (flag in opcode86.OPERSIZE.keys()): # lookup the value in the table
size = opcode86.OPERSIZE[flag][size >> 2]
for a in self.prefix:
if a.getType() & opcode86.PREFIX_OP_SIZE and size > 2:
size = 2
if a.getType() & opcode86.PREFIX_ADDR_SIZE:
# this simply swaps between 16- to 32-bit (default is determined on a "system-wide" level. This will require changing for 64-bit mode
if (self.addr_size == 2):
self.addr_size = 4
else:
self.addr_size = 2
return size
"""
### THIS IS THE OLD LIBDISASSEMBLE CODE...
#print flag
if flag == opcode86.OPTYPE_c:
size = (1,2)[size==4]
elif (flag == opcode86.OPTYPE_a) or (flag == opcode86.OPTYPE_v) or (flag == opcode86.OPTYPE_z):
size = (2,4)[size==4]
elif flag == opcode86.OPTYPE_p:
size = (4,6)[size==4]
elif flag == opcode86.OPTYPE_b:
size = 1
elif flag == opcode86.OPTYPE_w:
size = 2
elif flag == opcode86.OPTYPE_d:
size = 4
elif flag & opcode86.OPTYPE_s:
size = 6
elif flag & opcode86.OPTYPE_q:
size = 8
# - a lot more to add
"""
def get_reg(self, regtable, num):
return regtable[num]
def get_sib(self, data, mod):
count = 1
sib = data[0]
s=None
#print "SIB: %s" % hex(ord(data[0]))
scale = (sib >> 6) & 0x3 # XX
index = (sib & 56) >>3 # XXX
base = sib & 0x7 # XXX
base2 = None
index2= None
#print base, index, scale
# Especial case
if base == 5 and not mod:
base2 = Address(data[1:], 4)
count += 4
else:
if self.rex('b'):
base += 8
base2 = Register(base + 16)
index2=None
# Yeah, i know, this is really ugly
if index != 4: # ESP
if self.rex('x'):
index += 8
index2=Register( index + 16)
else:
scale = 0
s= SIB( 1<<scale, base2, index2)
return (count, s)
def get_modrm(self, data, flags, reg_type, size, type_flag):
"""
returns a tuple: (bytecount, Object)
* bytecount is the number of bytes to advance through data
"""
modrm= data[0]
count = 1
mod = (modrm >> 6) & 0x3 # XX
reg = (modrm >> 3) & 0x7 # XXX
rm = modrm & 0x7 # XXX
result = None
disp = None
base = None
rmoff = 0
regoff = 0
if self.rex('w'):
rmoff -= 16
regoff -= 16
if self.rex('b'):
rmoff += 8
if self.rex('r'):
regoff += 8
if flags == opcode86.MODRM_EA:
if mod == 3: # 11
result=Register(rm+reg_type+rmoff, type_flag)
elif mod == 0: # 0
if rm == 5:
disp= Address(data[count:], self.addr_size, type_flag)
count+= self.addr_size
elif rm == 4:
(tmpcount, base) =self.get_sib(data[count:], mod)
count+=tmpcount
else:
#print ("mod:%d\t reg:%d\t rm:%d"%(mod,reg,rm))
base=Register(rm+reg_type+rmoff, type_flag)
else:
if rm ==4:
disp_base = 2
(tmpcount, base) =self.get_sib(data[count:], mod)
count+=tmpcount
else:
disp_base = 1
base=Register(rm+reg_type+rmoff, type_flag)
#print ">BASE: %s" % base.printOpcode()
if mod == 1:
disp= Address(data[disp_base:], 1, type_flag)
count+=1
else:
disp= Address(data[disp_base:], self.addr_size, type_flag)
count+= self.addr_size
if disp or base:
result=Expression(disp, base, type_flag)
else:
result=Register(reg+reg_type+regoff, type_flag)
count=0
return (count, result)
# FIX:
#
def getOpcode(self, FORMAT, eip = 0):
opcode=[]
if not self.opcode:
return [0]
if FORMAT == "INTEL":
opcode.append("%s" % self.opcode)
#tmp="%-06s %s" % (self.opcode, " " * space)
if self.source:
opcode.append(self.source.printOpcode(FORMAT, eip))
#tmp+=" %s" % self.source.printOpcode(FORMAT, eip)
if self.dest:
opcode.append(self.dest.printOpcode(FORMAT, eip))
#tmp+=", %s" % self.dest.printOpcode(FORMAT, eip)
if self.aux:
opcode.append(self.aux.printOpcode(FORMAT, eip))
else:
mnemonic = self.opcode
post=[]
if self.source and self.dest:
addr_meth = self.source.getAddrMeth()
optype = self.source.getOpSize()
mnemonic = self.opcode
if addr_meth == opcode86.ADDRMETH_E and\
not (isinstance(self.source, Register) or\
isinstance(self.dest, Register)):
if optype == opcode86.OPTYPE_b:
mnemonic+="b"
elif optype== opcode86.OPTYPE_w:
mnemonic+=""
else :
mnemonic+="l"
##first="%-06s %s" % (mnemonic, " " * space)
post= [self.dest.printOpcode(FORMAT, eip), self.source.printOpcode(FORMAT, eip)]
if self.aux:
post.append(self.aux.printOpcode(FORMAT, eip))
#post = "%s, %s" % (self.dest.printOpcode(FORMAT,eip), self.source.printOpcode(FORMAT, eip))
elif self.source:
#second="%-06s %s" % (mnemonic, " " * space)
opt = self.getOpcodetype()
tmp=""
if (opt== opcode86.INS_CALL or\
opt== opcode86.INS_BRANCH)\
and self.source.getAddrMeth() == opcode86.ADDRMETH_E:
tmp = "*"
post=[tmp + self.source.printOpcode(FORMAT, eip)]
#post += "%s" % self.source.printOpcode(FORMAT, eip)
opcode = [mnemonic] + post
return (opcode, self.r, self.w)
def printOpcode(self, FORMAT, eip = 0, space=6):
opcode=self.getOpcode(FORMAT, eip + self.getSize())
prefix=self.getPrefix();
if opcode[0]==0:
return "invalid"
if len(opcode) ==2:
return "%-08s%s%s" % (prefix+opcode[0], " " * space, opcode[1])
#return "%-08s%s%s" % (prefix+opcode[0], " " * 6, opcode[1])
elif len(opcode) ==3:
return "%-08s%s%s, %s" % (prefix+opcode[0], " " * space, opcode[1], opcode[2])
#return "%-08s%s%s, %s" % (prefix+ opcode[0], " " * 6, opcode[1], opcode[2])
elif len(opcode) ==4:
return "%-08s%s%s, %s, %s" % (prefix+opcode[0], " " * space, opcode[3], opcode[1], opcode[2])
else:
return "%-08s" % (prefix+opcode[0])
return tmp
def rex(self, f):
if self.mode != 64:
return False
b, w, x, r = False, False, False, False
for a in self.prefix:
type = a.getType()
if type & opcode86.PREFIX_REX:
if type & opcode86.PREFIX_REXB:
b = True
if type & opcode86.PREFIX_REXW:
w = True
if type & opcode86.PREFIX_REXX:
x = True
if type & opcode86.PREFIX_REXR:
r = True
if f == 'w':
return w
if f == 'x':
return x
if f == 'b':
return b
if f == 'r':
return r
return False
def getPrefix(self):
prefix=""
for a in self.prefix:
type = a.getType()
if type in [opcode86.PREFIX_LOCK, opcode86.PREFIX_REPNZ, opcode86.PREFIX_REP]:
prefix+= a.getName() + " "
if self.mode == 64:
if (type & opcode86.PREFIX_REX):
rex = ''
if type & opcode86.PREFIX_REXB:
rex += 'b'
if type & opcode86.PREFIX_REXW:
rex += 'w'
if type & opcode86.PREFIX_REXX:
rex += 'x'
if type & opcode86.PREFIX_REXR:
rex += 'r'
if rex:
prefix += 'REX.'+rex+' '
else:
prefix += 'REX '
return prefix
if __name__=="__main__":
# To get this information, just
import sys
if len(sys.argv) != 4:
print ("usage: {} <file> <offset> <size>".format(sys.argv[0]))
print ("\t file:\t file to disassemble")
print ("\t offset:\t offset to beggining of code(hexa)")
print ("\t size:\t amount of bytes to dissasemble (hexa)\n")
sys.exit(0)
f=open(sys.argv[1])
offset= int(sys.argv[2], 16)
f.seek( offset )
buf=f.read(int(sys.argv[3], 16) )
off=0
FORMAT="AT&T"
print("Disassembling file %s at offset: 0x%x" % (sys.argv[1], offset))
while 1:
try:
p=Opcode(buf[off:])
print(" %08X: %s" % (off+offset, p.printOpcode(FORMAT)))
off+=p.getSize()
except ValueError:
break
|
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import tensorflow as tf
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.templates import errors
from tensorflow_federated.python.learning.templates import learning_process
LearningProcessOutput = learning_process.LearningProcessOutput
@computations.federated_computation()
def test_init_fn():
return intrinsics.federated_value(0, placements.SERVER)
test_state_type = test_init_fn.type_signature.result
@computations.tf_computation
def sum_sequence(s):
spec = s.element_spec
return s.reduce(
tf.zeros(spec.shape, spec.dtype),
lambda s, t: tf.nest.map_structure(tf.add, s, t))
ClientIntSequenceType = computation_types.at_clients(
computation_types.SequenceType(tf.int32))
def build_next_fn(server_init_fn):
@computations.federated_computation(server_init_fn.type_signature.result,
ClientIntSequenceType)
def next_fn(state, client_values):
metrics = intrinsics.federated_map(sum_sequence, client_values)
metrics = intrinsics.federated_sum(metrics)
return LearningProcessOutput(state, metrics)
return next_fn
def build_get_model_weights(server_init_fn):
@computations.tf_computation(server_init_fn.type_signature.result.member)
def get_model_weights(state):
return state
return get_model_weights
test_next_fn = build_next_fn(test_init_fn)
test_get_model_weights = build_get_model_weights(test_init_fn)
class LearningProcessTest(test_case.TestCase):
def test_construction_does_not_raise(self):
try:
learning_process.LearningProcess(test_init_fn, test_next_fn,
test_get_model_weights)
except: # pylint: disable=bare-except
self.fail('Could not construct a valid LearningProcess.')
def test_learning_process_can_be_reconstructed(self):
process = learning_process.LearningProcess(test_init_fn, test_next_fn,
test_get_model_weights)
try:
learning_process.LearningProcess(process.initialize, process.next,
process.get_model_weights)
except: # pylint: disable=bare-except
self.fail('Could not reconstruct the LearningProcess.')
def test_construction_with_empty_state_does_not_raise(self):
@computations.federated_computation()
def empty_initialize_fn():
return intrinsics.federated_value((), placements.SERVER)
next_fn = build_next_fn(empty_initialize_fn)
get_model_weights = build_get_model_weights(empty_initialize_fn)
try:
learning_process.LearningProcess(empty_initialize_fn, next_fn,
get_model_weights)
except: # pylint: disable=bare-except
self.fail('Could not construct a LearningProcess with empty state.')
def test_construction_with_unknown_dimension_does_not_raise(self):
create_empty_string = computations.tf_computation()(
lambda: tf.constant([], dtype=tf.string))
initialize_fn = computations.federated_computation()(
lambda: intrinsics.federated_value(create_empty_string(), placements.
SERVER))
next_fn = build_next_fn(initialize_fn)
get_model_weights = build_get_model_weights(initialize_fn)
try:
learning_process.LearningProcess(initialize_fn, next_fn,
get_model_weights)
except: # pylint: disable=bare-except
self.fail('Could not construct a LearningProcess with state type having '
'statically unknown shape.')
def test_init_not_tff_computation_raises(self):
with self.assertRaisesRegex(TypeError, r'Expected .*\.Computation, .*'):
init_fn = lambda: 0
learning_process.LearningProcess(init_fn, test_next_fn,
test_get_model_weights)
def test_next_not_tff_computation_raises(self):
with self.assertRaisesRegex(TypeError, r'Expected .*\.Computation, .*'):
learning_process.LearningProcess(
initialize_fn=test_init_fn,
next_fn=lambda state, client_data: LearningProcessOutput(state, ()),
get_model_weights=test_get_model_weights)
def test_init_param_not_empty_raises(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def one_arg_initialize_fn(x):
return x
with self.assertRaises(errors.TemplateInitFnParamNotEmptyError):
learning_process.LearningProcess(one_arg_initialize_fn, test_next_fn,
test_get_model_weights)
def test_init_state_not_assignable(self):
float_initialize_fn = computations.federated_computation()(lambda: 0.0)
with self.assertRaises(errors.TemplateStateNotAssignableError):
learning_process.LearningProcess(float_initialize_fn, test_next_fn,
test_get_model_weights)
def test_next_state_not_assignable(self):
float_initialize_fn = computations.federated_computation()(lambda: 0.0)
float_next_fn = build_next_fn(float_initialize_fn)
with self.assertRaises(errors.TemplateStateNotAssignableError):
learning_process.LearningProcess(test_init_fn, float_next_fn,
test_get_model_weights)
def test_init_fn_with_client_placed_state_raises(self):
init_fn = computations.federated_computation(
lambda: intrinsics.federated_value(0, placements.CLIENTS))
@computations.federated_computation(init_fn.type_signature.result,
ClientIntSequenceType)
def next_fn(state, client_values):
return LearningProcessOutput(state, client_values)
with self.assertRaises(learning_process.LearningProcessPlacementError):
learning_process.LearningProcess(init_fn, next_fn, test_get_model_weights)
def test_next_return_tuple_raises(self):
@computations.federated_computation(test_state_type, ClientIntSequenceType)
def tuple_next_fn(state, client_values):
metrics = intrinsics.federated_map(sum_sequence, client_values)
metrics = intrinsics.federated_sum(metrics)
return (state, metrics)
with self.assertRaises(learning_process.LearningProcessOutputError):
learning_process.LearningProcess(test_init_fn, tuple_next_fn,
test_get_model_weights)
def test_next_return_namedtuple_raises(self):
learning_process_output = collections.namedtuple('LearningProcessOutput',
['state', 'metrics'])
@computations.federated_computation(test_state_type, ClientIntSequenceType)
def namedtuple_next_fn(state, client_values):
metrics = intrinsics.federated_map(sum_sequence, client_values)
metrics = intrinsics.federated_sum(metrics)
return learning_process_output(state, metrics)
with self.assertRaises(learning_process.LearningProcessOutputError):
learning_process.LearningProcess(test_init_fn, namedtuple_next_fn,
test_get_model_weights)
def test_next_return_odict_raises(self):
@computations.federated_computation(test_state_type, ClientIntSequenceType)
def odict_next_fn(state, client_values):
metrics = intrinsics.federated_map(sum_sequence, client_values)
metrics = intrinsics.federated_sum(metrics)
return collections.OrderedDict(state=state, metrics=metrics)
with self.assertRaises(learning_process.LearningProcessOutputError):
learning_process.LearningProcess(test_init_fn, odict_next_fn,
test_get_model_weights)
def test_next_fn_with_one_parameter_raises(self):
@computations.federated_computation(test_state_type)
def next_fn(state):
return LearningProcessOutput(state, 0)
with self.assertRaises(errors.TemplateNextFnNumArgsError):
learning_process.LearningProcess(test_init_fn, next_fn,
test_get_model_weights)
def test_next_fn_with_three_parameters_raises(self):
@computations.federated_computation(test_state_type, ClientIntSequenceType,
test_state_type)
def next_fn(state, client_values, second_state): # pylint: disable=unused-argument
metrics = intrinsics.federated_map(sum_sequence, client_values)
metrics = intrinsics.federated_sum(metrics)
return LearningProcessOutput(state, metrics)
with self.assertRaises(errors.TemplateNextFnNumArgsError):
learning_process.LearningProcess(test_init_fn, next_fn,
test_get_model_weights)
def test_next_fn_with_non_client_placed_second_arg_raises(self):
int_sequence_at_server = computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.SERVER)
@computations.federated_computation(test_state_type, int_sequence_at_server)
def next_fn(state, server_values):
metrics = intrinsics.federated_map(sum_sequence, server_values)
return LearningProcessOutput(state, metrics)
with self.assertRaises(learning_process.LearningProcessPlacementError):
learning_process.LearningProcess(test_init_fn, next_fn,
test_get_model_weights)
def test_next_fn_with_non_sequence_second_arg_raises(self):
ints_at_clients = computation_types.FederatedType(tf.int32,
placements.CLIENTS)
@computations.federated_computation(test_state_type, ints_at_clients)
def next_fn(state, client_values):
metrics = intrinsics.federated_sum(client_values)
return LearningProcessOutput(state, metrics)
with self.assertRaises(learning_process.LearningProcessSequenceTypeError):
learning_process.LearningProcess(test_init_fn, next_fn,
test_get_model_weights)
def test_next_fn_with_client_placed_metrics_result_raises(self):
@computations.federated_computation(test_state_type, ClientIntSequenceType)
def next_fn(state, metrics):
return LearningProcessOutput(state, metrics)
with self.assertRaises(learning_process.LearningProcessPlacementError):
learning_process.LearningProcess(test_init_fn, next_fn,
test_get_model_weights)
def test_non_tff_computation_get_model_weights_raises(self):
get_model_weights = lambda x: x
with self.assertRaisesRegex(TypeError, r'Expected .*\.Computation, .*'):
learning_process.LearningProcess(test_init_fn, test_next_fn,
get_model_weights)
def test_federated_get_model_weights_raises(self):
get_model_weights = computations.federated_computation(test_state_type)(
lambda x: x)
with self.assertRaises(learning_process.GetModelWeightsTypeSignatureError):
learning_process.LearningProcess(test_init_fn, test_next_fn,
get_model_weights)
def test_get_model_weights_param_not_assignable(self):
get_model_weights = computations.tf_computation(tf.float32)(lambda x: x)
with self.assertRaises(learning_process.GetModelWeightsTypeSignatureError):
learning_process.LearningProcess(test_init_fn, test_next_fn,
get_model_weights)
if __name__ == '__main__':
test_case.main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.is_sticky'
db.add_column('blogs_post', 'is_sticky',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Category.cat_image_email'
db.add_column('blogs_category', 'cat_image_email',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Category.cat_image_fb'
db.add_column('blogs_category', 'cat_image_fb',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Category.cat_image_tw'
db.add_column('blogs_category', 'cat_image_tw',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Category.cat_image_pint'
db.add_column('blogs_category', 'cat_image_pint',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Category.cat_image_caret'
db.add_column('blogs_category', 'cat_image_caret',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.is_sticky'
db.delete_column('blogs_post', 'is_sticky')
# Deleting field 'Category.cat_image_email'
db.delete_column('blogs_category', 'cat_image_email')
# Deleting field 'Category.cat_image_fb'
db.delete_column('blogs_category', 'cat_image_fb')
# Deleting field 'Category.cat_image_tw'
db.delete_column('blogs_category', 'cat_image_tw')
# Deleting field 'Category.cat_image_pint'
db.delete_column('blogs_category', 'cat_image_pint')
# Deleting field 'Category.cat_image_caret'
db.delete_column('blogs_category', 'cat_image_caret')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'draft_notice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'header_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'main_color': ('django.db.models.fields.CharField', [], {'default': "'#ff7f00'", 'max_length': '10'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'pinterest_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Template']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_category'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'cat_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_caret': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_email': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_fb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_pint': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_tw': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'default': "'#000000'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'parent_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_category'", 'null': 'True', 'to': "orm['blogs.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menu': {
'Meta': {'object_name': 'Menu'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_menu'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Menu']", 'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Page']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_page'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Tag']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_ogg': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'vimeo_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'blogs.rss': {
'Meta': {'object_name': 'Rss'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_rss'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_tag'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.template': {
'Meta': {'object_name': 'Template'},
'archives': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'base': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'category': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'single': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
|
import datetime
import unittest
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models, transaction
from django.db.models import CharField, Q, TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
Case, Col, Exists, ExpressionList, ExpressionWrapper, F, Func, OrderBy,
OuterRef, Random, RawSQL, Ref, Subquery, Value, When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.db.models.sql import constants
from django.db.models.sql.datastructures import Join
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import Approximate
from .models import (
UUID, UUIDPK, Company, Employee, Experiment, Number, Result, SimulationRun,
Time,
)
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.example_inc = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
cls.foobar_ltd = Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
cls.gmbh = Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann", salary=30)
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(
F('salaries') + F('num_employees'),
output_field=models.IntegerField()
),
)
self.assertEqual(companies['result'], 2395)
def test_annotate_values_filter(self):
companies = Company.objects.annotate(
foo=RawSQL('%s', ['value']),
).filter(foo='value').order_by('name')
self.assertQuerysetEqual(
companies, [
'<Company: Example Inc.>',
'<Company: Foobar Ltd.>',
'<Company: Test GmbH>',
],
)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertSequenceEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertSequenceEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertSequenceEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
)
def test_order_of_operations(self):
# Law of order of operations is followed
self. company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertSequenceEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertSequenceEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: str(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [
None,
None,
],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
with self.assertRaisesMessage(FieldError, 'Aggregate functions are not allowed in this query'):
Company.objects.create(
name='Company', num_employees=Max(Value(1)), num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(name="Test GmbH")
def test():
test_gmbh.point_of_contact = F("ceo")
msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.'
with self.assertRaisesMessage(ValueError, msg):
test()
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
test_gmbh.save()
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
test_gmbh = Company.objects.get(name="Test GmbH")
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
'expressions.Company.num_employees. F() expressions can only be '
'used to update, not to insert.'
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
acme.num_employees = 12
acme.name = Lower(F('name'))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
'expressions can only be used to update, not to insert.'
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertSequenceEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2, e3] if connection.features.has_case_insensitive_like else [e2]
)
qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk')
self.assertSequenceEqual(qs, [e2, e3])
def test_ticket_18375_join_reuse(self):
# Reverse multijoin F() references and the lookup target the same join.
# Pre #18375 the F() join was generated first and the lookup couldn't
# reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_outerref(self):
inner = Company.objects.filter(point_of_contact=OuterRef('pk'))
msg = (
'This queryset contains a reference to an outer query and may only '
'be used in a subquery.'
)
with self.assertRaisesMessage(ValueError, msg):
inner.exists()
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
self.assertIs(outer.exists(), True)
def test_subquery(self):
Company.objects.filter(name='Example Inc.').update(
point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'),
ceo=Employee.objects.get(firstname='Max', lastname='Mustermann'),
)
Employee.objects.create(firstname='Bob', lastname='Brown', salary=40)
qs = Employee.objects.annotate(
is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))),
is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))),
is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))),
is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))),
largest_company=Subquery(Company.objects.order_by('-num_employees').filter(
models.Q(ceo=OuterRef('pk')) | models.Q(point_of_contact=OuterRef('pk'))
).values('name')[:1], output_field=models.CharField())
).values(
'firstname',
'is_point_of_contact',
'is_not_point_of_contact',
'is_ceo_of_small_company',
'is_ceo_small_2',
'largest_company',
).order_by('firstname')
results = list(qs)
# Could use Coalesce(subq, Value('')) instead except for the bug in
# cx_Oracle mentioned in #23843.
bob = results[0]
if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls:
bob['largest_company'] = None
self.assertEqual(results, [
{
'firstname': 'Bob',
'is_point_of_contact': False,
'is_not_point_of_contact': True,
'is_ceo_of_small_company': False,
'is_ceo_small_2': False,
'largest_company': None,
},
{
'firstname': 'Frank',
'is_point_of_contact': False,
'is_not_point_of_contact': True,
'is_ceo_of_small_company': True,
'is_ceo_small_2': True,
'largest_company': 'Foobar Ltd.',
},
{
'firstname': 'Joe',
'is_point_of_contact': True,
'is_not_point_of_contact': False,
'is_ceo_of_small_company': False,
'is_ceo_small_2': False,
'largest_company': 'Example Inc.',
},
{
'firstname': 'Max',
'is_point_of_contact': False,
'is_not_point_of_contact': True,
'is_ceo_of_small_company': True,
'is_ceo_small_2': True,
'largest_company': 'Example Inc.'
}
])
# A less elegant way to write the same query: this uses a LEFT OUTER
# JOIN and an IS NULL, inside a WHERE NOT IN which is probably less
# efficient than EXISTS.
self.assertCountEqual(
qs.filter(is_point_of_contact=True).values('pk'),
Employee.objects.exclude(company_point_of_contact_set=None).values('pk')
)
def test_in_subquery(self):
# This is a contrived test (and you really wouldn't write this query),
# but it is a succinct way to test the __in=Subquery() construct.
small_companies = Company.objects.filter(num_employees__lt=200).values('pk')
subquery_test = Company.objects.filter(pk__in=Subquery(small_companies))
self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh])
subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3)))
self.assertCountEqual(subquery_test2, [self.foobar_ltd])
def test_uuid_pk_subquery(self):
u = UUIDPK.objects.create()
UUID.objects.create(uuid_fk=u)
qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id')))
self.assertCountEqual(qs, [u])
def test_nested_subquery(self):
inner = Company.objects.filter(point_of_contact=OuterRef('pk'))
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
contrived = Employee.objects.annotate(
is_point_of_contact=Subquery(
outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'),
output_field=models.BooleanField(),
),
)
self.assertCountEqual(contrived.values_list(), outer.values_list())
def test_nested_subquery_outer_ref_2(self):
first = Time.objects.create(time='09:00')
second = Time.objects.create(time='17:00')
third = Time.objects.create(time='21:00')
SimulationRun.objects.bulk_create([
SimulationRun(start=first, end=second, midpoint='12:00'),
SimulationRun(start=first, end=third, midpoint='15:00'),
SimulationRun(start=second, end=first, midpoint='00:00'),
])
inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time')
middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1]
outer = Time.objects.annotate(other=Subquery(middle, output_field=models.TimeField()))
# This is a contrived example. It exercises the double OuterRef form.
self.assertCountEqual(outer, [first, second, third])
def test_annotations_within_subquery(self):
Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank'))
inner = Company.objects.filter(
ceo=OuterRef('pk')
).values('ceo').annotate(total_employees=models.Sum('num_employees')).values('total_employees')
outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner))
self.assertSequenceEqual(
outer.order_by('-total_employees').values('salary', 'total_employees'),
[{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}],
)
def test_subquery_references_joined_table_twice(self):
inner = Company.objects.filter(
num_chairs__gte=OuterRef('ceo__salary'),
num_employees__gte=OuterRef('point_of_contact__salary'),
)
# Another contrived example (there is no need to have a subquery here)
outer = Company.objects.filter(pk__in=Subquery(inner.values('pk')))
self.assertFalse(outer.exists())
def test_explicit_output_field(self):
class FuncA(Func):
output_field = models.CharField()
class FuncB(Func):
pass
expr = FuncB(FuncA())
self.assertEqual(expr.output_field, FuncA.output_field)
class IterableLookupInnerExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30)
# MySQL requires that the values calculated for expressions don't pass
# outside of the field's range, so it's inconvenient to use the values
# in the more general tests.
Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo)
Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo)
Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo)
Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo)
Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo)
def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
# __in lookups can use F() expressions for integers.
queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10]))
self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])),
['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(
num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10])
),
['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
def test_expressions_in_lookups_join_choice(self):
midpoint = datetime.time(13, 0)
t1 = Time.objects.create(time=datetime.time(12, 0))
t2 = Time.objects.create(time=datetime.time(14, 0))
SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')])
self.assertQuerysetEqual(
queryset,
['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'],
ordered=False
)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.INNER)
queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')])
self.assertQuerysetEqual(queryset, [], ordered=False)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.LOUTER)
def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
# Range lookups can use F() expressions for integers.
Company.objects.filter(num_employees__exact=F("num_chairs"))
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(F('num_chairs'), 100)),
['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)),
['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)),
['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(1, 100)),
[
'<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>',
'<Company: 5060 Ltd>', '<Company: 99300 Ltd>',
],
ordered=False
)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This defensive test only works on databases that don't validate parameter types")
def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self):
"""
This tests that SQL injection isn't possible using compilation of
expressions in iterable filters, as their compilation happens before
the main query compilation. It's limited to SQLite, as PostgreSQL,
Oracle and other vendors have defense in depth against this by type
checking. Testing against SQLite (the most permissive of the built-in
databases) demonstrates that the problem doesn't exist while keeping
the test simple.
"""
queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1'])
self.assertQuerysetEqual(queryset, [], ordered=False)
def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self):
start = datetime.datetime(2016, 2, 3, 15, 0, 0)
end = datetime.datetime(2016, 2, 5, 15, 0, 0)
experiment_1 = Experiment.objects.create(
name='Integrity testing',
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
experiment_2 = Experiment.objects.create(
name='Taste testing',
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
)
Result.objects.create(
experiment=experiment_2,
result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
)
within_experiment_time = [F('experiment__start'), F('experiment__end')]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"])
within_experiment_time = [F('experiment__start'), F('experiment__end')]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"])
class FTests(SimpleTestCase):
def test_deepcopy(self):
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_deconstruct(self):
f = F('name')
path, args, kwargs = f.deconstruct()
self.assertEqual(path, 'django.db.models.expressions.F')
self.assertEqual(args, (f.name,))
self.assertEqual(kwargs, {})
def test_equal(self):
f = F('name')
same_f = F('name')
other_f = F('username')
self.assertEqual(f, same_f)
self.assertNotEqual(f, other_f)
def test_hash(self):
d = {F('name'): 'Bob'}
self.assertIn(F('name'), d)
self.assertEqual(d[F('name')], 'Bob')
def test_not_equal_Value(self):
f = F('name')
value = Value('name')
self.assertNotEqual(f, value)
self.assertNotEqual(value, f)
class ExpressionsTests(TestCase):
def test_F_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False)
def test_insensitive_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n = Number.objects.create(integer=42, float=15.5)
cls.n1 = Number.objects.create(integer=-42, float=-15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_left_shift_operator(self):
Number.objects.update(integer=F('integer').bitleftshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168)
def test_lefthand_bitwise_right_shift_operator(self):
Number.objects.update(integer=F('integer').bitrightshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11)
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
delta5 = datetime.timedelta(days=90)
# Test data is set so that deltas and delays will be
# strictly increasing.
cls.deltas = []
cls.delays = []
cls.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(
name='e0', assigned=sday, start=stime, end=end,
completed=end.date(), estimated_time=delta0,
)
cls.deltas.append(delta0)
cls.delays.append(e0.start - datetime.datetime.combine(e0.assigned, midnight))
cls.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite.
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(
name='e1', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta1,
)
cls.deltas.append(delta1)
cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
cls.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(
name='e2', assigned=sday - datetime.timedelta(3), start=stime,
end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1),
)
cls.deltas.append(delta2)
cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
cls.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(
name='e3', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta3,
)
cls.deltas.append(delta3)
cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
cls.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(
name='e4', assigned=sday - datetime.timedelta(10), start=stime,
end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1),
)
cls.deltas.append(delta4)
cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
cls.days_long.append(e4.completed - e4.assigned)
# e5: started a month after assignment, very long duration
delay = datetime.timedelta(30)
end = stime + delay + delta5
e5 = Experiment.objects.create(
name='e5', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta5,
)
cls.deltas.append(delta5)
cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight))
cls.days_long.append(e5.completed - e5.assigned)
cls.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1))
]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [
e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))
]
self.assertEqual(delta_math, ['e4'])
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_date_subtraction(self):
queryset = Experiment.objects.annotate(
completion_duration=ExpressionWrapper(
F('completed') - F('assigned'), output_field=models.DurationField()
)
)
at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))}
self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'})
at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))}
self.assertEqual(at_least_120_days, {'e5'})
less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))}
self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'})
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_time_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
queryset = Time.objects.annotate(
difference=ExpressionWrapper(
F('time') - Value(datetime.time(11, 15, 0), output_field=models.TimeField()),
output_field=models.DurationField(),
)
)
self.assertEqual(
queryset.get().difference,
datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345)
)
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_datetime_subtraction(self):
under_estimate = [
e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))
]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [
e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))
]
self.assertEqual(over_estimate, ['e4'])
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = Experiment.objects.exclude(name='e1').filter(
completed__gt=self.stime + F('estimated_time'),
).order_by('name')
self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name)
def test_date_minus_duration(self):
more_than_4_days = Experiment.objects.filter(
assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=models.DurationField())
)
self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name)
def test_negative_timedelta_update(self):
# subtract 30 seconds, 30 minutes, 2 hours and 2 days
experiments = Experiment.objects.filter(name='e0').annotate(
start_sub_seconds=F('start') + datetime.timedelta(seconds=-30),
).annotate(
start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30),
).annotate(
start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2),
).annotate(
new_start=F('start_sub_hours') + datetime.timedelta(days=-2),
)
expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0)
# subtract 30 microseconds
experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30))
expected_start += datetime.timedelta(microseconds=+746970)
experiments.update(start=F('new_start'))
e0 = Experiment.objects.get(name='e0')
self.assertEqual(e0.start, expected_start)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
def test_deconstruct(self):
value = Value('name')
path, args, kwargs = value.deconstruct()
self.assertEqual(path, 'django.db.models.expressions.Value')
self.assertEqual(args, (value.value,))
self.assertEqual(kwargs, {})
def test_deconstruct_output_field(self):
value = Value('name', output_field=CharField())
path, args, kwargs = value.deconstruct()
self.assertEqual(path, 'django.db.models.expressions.Value')
self.assertEqual(args, (value.value,))
self.assertEqual(len(kwargs), 1)
self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct())
def test_equal(self):
value = Value('name')
same_value = Value('name')
other_value = Value('username')
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
def test_hash(self):
d = {Value('name'): 'Bob'}
self.assertIn(Value('name'), d)
self.assertEqual(d[Value('name')], 'Bob')
def test_equal_output_field(self):
value = Value('name', output_field=CharField())
same_value = Value('name', output_field=CharField())
other_value = Value('name', output_field=TimeField())
no_output_field = Value('name')
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
self.assertNotEqual(value, no_output_field)
def test_raise_empty_expressionlist(self):
msg = 'ExpressionList requires at least one expression'
with self.assertRaisesMessage(ValueError, msg):
ExpressionList()
class ReprTests(TestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(
repr(When(Q(age__gte=18), then=Value('legal'))),
"<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>")
self.assertEqual(
repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))"
)
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
self.assertEqual(
repr(ExpressionList(F('col'), F('anothercol'))),
'ExpressionList(F(col), F(anothercol))'
)
self.assertEqual(
repr(ExpressionList(OrderBy(F('col'), descending=False))),
'ExpressionList(OrderBy(F(col), descending=False))'
)
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
self.assertEqual(repr(Count('*')), "Count('*', distinct=False)")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
def test_filtered_aggregates(self):
filter = Q(a=1)
self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), distinct=False, filter=(AND: ('a', 1)))")
self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)")
self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(
repr(Variance('a', sample=True, filter=filter)),
"Variance(F(a), filter=(AND: ('a', 1)), sample=True)"
)
|
|
#!/usr/bin/env python
"""Microbenchmark for Python's sequence unpacking."""
# Python imports
import optparse
import time
# Local imports
import util
def do_unpacking(iterations, to_unpack):
times = []
for _ in xrange(iterations):
t0 = time.time()
# 400 unpackings
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
t1 = time.time()
times.append(t1 - t0)
return times
def test_tuple_unpacking(iterations):
x = tuple(range(10))
return do_unpacking(iterations, x)
def test_list_unpacking(iterations):
x = range(10)
return do_unpacking(iterations, x)
def test_all(iterations):
tuple_data = test_tuple_unpacking(iterations)
list_data = test_list_unpacking(iterations)
return [x + y for (x, y) in zip(tuple_data, list_data)]
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options] [test]",
description=("Test the performance of sequence unpacking."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
tests = {"tuple": test_tuple_unpacking, "list": test_list_unpacking}
if len(args) > 1:
parser.error("Can only specify one test")
elif len(args) == 1:
func = tests.get(args[0])
if func is None:
parser.error("Invalid test name")
util.run_benchmark(options, options.num_runs, func)
else:
util.run_benchmark(options, options.num_runs, test_all)
|
|
from __future__ import print_function, division
from sympy.core import C, Add, Mul, Pow, S
from sympy.core.compatibility import default_sort_key, string_types
from sympy.core.mul import _keep_coeff
from sympy.printing.str import StrPrinter
from sympy.printing.precedence import precedence
from sympy.core.sympify import _sympify
class AssignmentError(Exception):
"""
Raised if an assignment variable for a loop is missing.
"""
pass
class Assignment(C.Relational):
"""
Represents variable assignment for code generation.
Parameters
----------
lhs : Expr
Sympy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
Sympy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
--------
>>> from sympy import symbols, MatrixSymbol, Matrix
>>> from sympy.printing.codeprinter import Assignment
>>> x, y, z = symbols('x, y, z')
>>> Assignment(x, y)
x := y
>>> Assignment(x, 0)
x := 0
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
A := Matrix([[x, y, z]])
>>> Assignment(A[0, 1], x)
A[0, 1] := x
"""
rel_op = ':='
__slots__ = []
def __new__(cls, lhs, rhs=0, **assumptions):
lhs = _sympify(lhs)
rhs = _sympify(rhs)
# Tuple of things that can be on the lhs of an assignment
assignable = (C.Symbol, C.MatrixSymbol, C.MatrixElement, C.Indexed)
if not isinstance(lhs, assignable):
raise TypeError("Cannot assign to lhs of type %s." % type(lhs))
# Indexed types implement shape, but don't define it until later. This
# causes issues in assignment validation. For now, matrices are defined
# as anything with a shape that is not an Indexed
lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, C.Indexed)
rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, C.Indexed)
# If lhs and rhs have same structure, then this assignment is ok
if lhs_is_mat:
if not rhs_is_mat:
raise ValueError("Cannot assign a scalar to a matrix.")
elif lhs.shape != rhs.shape:
raise ValueError("Dimensions of lhs and rhs don't align.")
elif rhs_is_mat and not lhs_is_mat:
raise ValueError("Cannot assign a matrix to a scalar.")
return C.Relational.__new__(cls, lhs, rhs, **assumptions)
class CodePrinter(StrPrinter):
"""
The base class for code-printing subclasses.
"""
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
def doprint(self, expr, assign_to=None):
"""
Print the expression as code.
Parameters
----------
expr : Expression
The expression to be printed.
assign_to : Symbol, MatrixSymbol, or string (optional)
If provided, the printed code will set the expression to a
variable with name ``assign_to``.
"""
if isinstance(assign_to, string_types):
assign_to = C.Symbol(assign_to)
elif not isinstance(assign_to, (C.Basic, type(None))):
raise TypeError("{0} cannot assign to object of type {1}".format(
type(self).__name__, type(assign_to)))
if assign_to:
expr = Assignment(assign_to, expr)
else:
expr = _sympify(expr)
# keep a set of expressions that are not strictly translatable to Code
# and number constants that must be declared and initialized
self._not_supported = set()
self._number_symbols = set()
lines = self._print(expr).splitlines()
# format the output
if self._settings["human"]:
frontlines = []
if len(self._not_supported) > 0:
frontlines.append(self._get_comment(
"Not supported in {0}:".format(self.language)))
for expr in sorted(self._not_supported, key=str):
frontlines.append(self._get_comment(type(expr).__name__))
for name, value in sorted(self._number_symbols, key=str):
frontlines.append(self._declare_number_const(name, value))
lines = frontlines + lines
lines = self._format_code(lines)
result = "\n".join(lines)
else:
lines = self._format_code(lines)
result = (self._number_symbols, self._not_supported,
"\n".join(lines))
del self._not_supported
del self._number_symbols
return result
def _doprint_loops(self, expr, assign_to=None):
# Here we print an expression that contains Indexed objects, they
# correspond to arrays in the generated code. The low-level implementation
# involves looping over array elements and possibly storing results in temporary
# variables or accumulate it in the assign_to object.
if self._settings.get('contract', True):
from sympy.tensor import get_contraction_structure
# Setup loops over non-dummy indices -- all terms need these
indices = self._get_expression_indices(expr, assign_to)
# Setup loops over dummy indices -- each term needs separate treatment
dummies = get_contraction_structure(expr)
else:
indices = []
dummies = {None: (expr,)}
openloop, closeloop = self._get_loop_opening_ending(indices)
# terms with no summations first
if None in dummies:
text = StrPrinter.doprint(self, Add(*dummies[None]))
else:
# If all terms have summations we must initialize array to Zero
text = StrPrinter.doprint(self, 0)
# skip redundant assignments (where lhs == rhs)
lhs_printed = self._print(assign_to)
lines = []
if text != lhs_printed:
lines.extend(openloop)
if assign_to is not None:
text = self._get_statement("%s = %s" % (lhs_printed, text))
lines.append(text)
lines.extend(closeloop)
# then terms with summations
for d in dummies:
if isinstance(d, tuple):
indices = self._sort_optimized(d, expr)
openloop_d, closeloop_d = self._get_loop_opening_ending(
indices)
for term in dummies[d]:
if term in dummies and not ([list(f.keys()) for f in dummies[term]]
== [[None] for f in dummies[term]]):
# If one factor in the term has it's own internal
# contractions, those must be computed first.
# (temporary variables?)
raise NotImplementedError(
"FIXME: no support for contractions in factor yet")
else:
# We need the lhs expression as an accumulator for
# the loops, i.e
#
# for (int d=0; d < dim; d++){
# lhs[] = lhs[] + term[][d]
# } ^.................. the accumulator
#
# We check if the expression already contains the
# lhs, and raise an exception if it does, as that
# syntax is currently undefined. FIXME: What would be
# a good interpretation?
if assign_to is None:
raise AssignmentError(
"need assignment variable for loops")
if term.has(assign_to):
raise ValueError("FIXME: lhs present in rhs,\
this is undefined in CodePrinter")
lines.extend(openloop)
lines.extend(openloop_d)
text = "%s = %s" % (lhs_printed, StrPrinter.doprint(
self, assign_to + term))
lines.append(self._get_statement(text))
lines.extend(closeloop_d)
lines.extend(closeloop)
return "\n".join(lines)
def _get_expression_indices(self, expr, assign_to):
from sympy.tensor import get_indices
rinds, junk = get_indices(expr)
linds, junk = get_indices(assign_to)
# support broadcast of scalar
if linds and not rinds:
rinds = linds
if rinds != linds:
raise ValueError("lhs indices must match non-dummy"
" rhs indices in %s" % expr)
return self._sort_optimized(rinds, assign_to)
def _sort_optimized(self, indices, expr):
if not indices:
return []
# determine optimized loop order by giving a score to each index
# the index with the highest score are put in the innermost loop.
score_table = {}
for i in indices:
score_table[i] = 0
arrays = expr.atoms(C.Indexed)
for arr in arrays:
for p, ind in enumerate(arr.indices):
try:
score_table[ind] += self._rate_index_position(p)
except KeyError:
pass
return sorted(indices, key=lambda x: score_table[x])
def _rate_index_position(self, p):
"""function to calculate score based on position among indices
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized()
"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_statement(self, codestring):
"""Formats a codestring with the proper line ending."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_comment(self, text):
"""Formats a text string as a comment."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _declare_number_const(self, name, value):
"""Declare a numeric constant at the top of a function"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _format_code(self, lines):
"""Take in a list of lines of code, and format them accordingly.
This may include indenting, wrapping long lines, etc..."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists
of codelines"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _print_Assignment(self, expr):
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if isinstance(expr.rhs, C.Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = C.Piecewise(*zip(expressions, conditions))
return self._print(temp)
elif isinstance(lhs, C.MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return "\n".join(lines)
elif self._settings["contract"] and (lhs.has(C.IndexedBase) or
rhs.has(C.IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
func = None
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func in cond_func:
if cond(*expr.args):
break
if func is not None:
return "%s(%s)" % (func, self.stringify(expr.args, ", "))
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, C.Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
else:
return self._print_not_supported(expr)
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr,
self._print(expr.evalf(self._settings["precision"]))))
return str(expr)
def _print_Dummy(self, expr):
# dummies must be printed as unique symbols
return "%s_%i" % (expr.name, expr.dummy_index) # Dummy
_print_Catalan = _print_NumberSymbol
_print_EulerGamma = _print_NumberSymbol
_print_GoldenRatio = _print_NumberSymbol
_print_Exp1 = _print_NumberSymbol
_print_Pi = _print_NumberSymbol
def _print_And(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
return sign + '*'.join(a_str) + "/" + b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_not_supported(self, expr):
self._not_supported.add(expr)
return self.emptyPrinter(expr)
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_print_dict = _print_not_supported
_print_ExprCondPair = _print_not_supported
_print_GeometryEntity = _print_not_supported
_print_Infinity = _print_not_supported
_print_Integral = _print_not_supported
_print_Interval = _print_not_supported
_print_Limit = _print_not_supported
_print_list = _print_not_supported
_print_Matrix = _print_not_supported
_print_ImmutableMatrix = _print_not_supported
_print_MutableDenseMatrix = _print_not_supported
_print_MatrixBase = _print_not_supported
_print_DeferredVector = _print_not_supported
_print_NaN = _print_not_supported
_print_NegativeInfinity = _print_not_supported
_print_Normal = _print_not_supported
_print_Order = _print_not_supported
_print_PDF = _print_not_supported
_print_RootOf = _print_not_supported
_print_RootsOf = _print_not_supported
_print_RootSum = _print_not_supported
_print_Sample = _print_not_supported
_print_SparseMatrix = _print_not_supported
_print_tuple = _print_not_supported
_print_Uniform = _print_not_supported
_print_Unit = _print_not_supported
_print_Wild = _print_not_supported
_print_WildFunction = _print_not_supported
|
|
"""
This is originally from:
https://github.com/mahendra/kafka-python/blob/zookeeper/kafka/zookeeper.py
It is modified in a few places to work with more recent KafkaClient.
Also, multiprocess is substituted for threading. Since threading
is gevent friendly, where multiprocess is not.
"""
import logging
import threading
import os
import random
import socket
import uuid
import json
from functools import partial
from kafka.client import KafkaClient
from kafka.producer import SimpleProducer, KeyedProducer
from kafka.consumer import SimpleConsumer
from kazoo.exceptions import SessionExpiredError
from kazoo.client import KazooClient
import time
import sys
if 'gevent' in sys.modules:
from kazoo.handlers.gevent import SequentialGeventHandler as kazoo_handler
else:
from kazoo.handlers.threading import SequentialThreadingHandler as kazoo_handler
BROKER_IDS_PATH = 'brokers/ids/' # Path where kafka stores broker info
PARTITIONER_PATH = 'python/kafka/' # Path to use for consumer co-ordination
DEFAULT_TIME_BOUNDARY = 10
# how many attempts to create a valid partition
MAX_PARTITION_ALLOCATION_ATTEMPTS = 100
# how much time to wait to create a valid partition
MAX_PARTITION_ALLOCATION_TIME = 120 # in seconds
# Allocation states
ALLOCATION_COMPLETED = -1
ALLOCATION_CHANGING = -2
ALLOCATION_FAILED = -3
ALLOCATION_MISSED = -4
ALLOCATION_INACTIVE = -5
log = logging.getLogger("kafka")
random.seed()
def _get_brokers(zkclient, chroot='/'):
"""
Get the list of available brokers registered in zookeeper
"""
brokers = []
root = os.path.join(chroot, BROKER_IDS_PATH)
for broker_id in zkclient.get_children(root):
path = os.path.join(root, broker_id)
info, _ = zkclient.get(path)
info = json.loads(info)
brokers.append((info['host'], info['port']))
log.debug("List of brokers fetched" + str(brokers))
random.shuffle(brokers)
return brokers
def get_client(zkclient, chroot='/'):
"""
Given a zookeeper client, return a KafkaClient instance for use
"""
brokers = _get_brokers(zkclient, chroot=chroot)
brokers = ["%s:%s" % (host, port) for (host, port) in brokers]
return KafkaClient(brokers)
# TODO: Make this a subclass of Producer later
class ZProducer(object):
"""
A base Zookeeper producer to be used by other producer classes
Args
hosts: Comma-separated list of hosts to connect to
(e.g. 127.0.0.1:2181,127.0.0.1:2182)
topic - The kafka topic to send messages to
chroot - The kafka subdirectory to search for brokers
"""
producer_kls = None
def __init__(self, hosts, topic, chroot='/', **kwargs):
if self.producer_kls is None:
raise NotImplemented("Producer class needs to be mentioned")
self.zkclient = KazooClient(hosts=hosts)
self.zkclient.start()
# Start the producer instance
self.client = get_client(self.zkclient, chroot=chroot)
self.producer = self.producer_kls(self.client, topic, **kwargs)
# Stop Zookeeper
self.zkclient.stop()
self.zkclient.close()
self.zkclient = None
def stop(self):
self.producer.stop()
self.client.close()
class ZSimpleProducer(ZProducer):
"""
A simple, round-robbin producer. Each message goes to exactly one partition
Args:
hosts: Comma-separated list of hosts to connect to
(e.g. 127.0.0.1:2181,127.0.0.1:2182)
topic - The kafka topic to send messages to
"""
producer_kls = SimpleProducer
def send_messages(self, *msg):
self.producer.send_messages(*msg)
class ZKeyedProducer(ZProducer):
"""
A producer which distributes messages to partitions based on a
partitioner function (class) and the key
hosts: Comma-separated list of hosts to connect to
(e.g. 127.0.0.1:2181,127.0.0.1:2182)
topic - The kafka topic to send messages to
partitioner - A partitioner class that will be used to get the partition
to send the message to. Must be derived from Partitioner
"""
producer_kls = KeyedProducer
def send(self, key, msg):
self.producer.send(key, msg)
class DefaultZSimpleConsumerException(Exception):
pass
class ZSimpleConsumer(object):
"""
A consumer that uses Zookeeper to co-ordinate and share the partitions
of a topic with other consumers
hosts: Comma-separated list of hosts to connect to
(e.g. 127.0.0.1:2181,127.0.0.1:2182)
group: a name for this consumer, used for offset storage and must be unique
topic: the topic to consume
chroot - The kafka subdirectory to search for brokers
driver_type: The driver type to use for the consumer
block_init: If True, the init method will block till the allocation is
completed. If not, it will return immediately and user can invoke
consumer.status() to check the status. Default True.
time_boundary: The time interval, in seconds, to wait out before deciding
on consumer changes in zookeeper. A higher value will ensure that a
consumer restart will not cause two re-balances.
(Default 10s)
ignore_non_allocation: If set to True, the consumer will ignore the
case where no partitions were allocated to it.
This can be used to keep consumers in stand-by. They will take over
when another consumer fails. (Default False)
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
Partition allocation details
* When the consumer is initialized, it blocks till it gets an allocation
* If ignore_non_allocation is False, the consumer will throw an error
in init or during other operations
* During re-balancing of partitions, the consumer will not return any
messages (iteration or get_messages)
* After re-balancing, if the consumer does not get any partitions,
ignore_non_allocation will control it's behaviour
"""
def __init__(self,
hosts,
group,
topic,
chroot='/',
block_init=True,
time_boundary=DEFAULT_TIME_BOUNDARY,
ignore_non_allocation=False,
**kwargs):
# User is not allowed to specify partitions
if 'partitions' in kwargs:
raise ValueError("Partitions cannot be specified")
self.ignore_non_allocation = ignore_non_allocation
self.time_boundary = time_boundary
self.zkclient = KazooClient(hosts, handler=kazoo_handler())
self.zkclient.start()
self.client = get_client(self.zkclient, chroot=chroot)
self.client.load_metadata_for_topics(topic)
self.partitions = set(self.client.topic_partitions[topic])
# self.allocated = [ALLOCATION_CHANGING] * len(partitions)
self.path = os.path.join(chroot, PARTITIONER_PATH, topic, group)
log.debug("Using path %s for co-ordination" % self.path)
# Create a function which can be used for creating consumers
self.consumer = None
self.consumer_fact = partial(SimpleConsumer,
self.client,
group,
topic,
**kwargs)
self.consumer_topic = topic
self.consumer_group = group
# Keep monitoring for changes
# Design:
# * We will have a worker which will keep monitoring for rebalance
# * The worker and main consumer will share data via shared memory
# protected by a lock
# * If the worker gets new allocations, it will SET an Event()
# * The main consumer will check this event to change itself
# * Main consumer will SET another Event() to indicate worker to exit
# This event will notify the worker to exit
self.exit = threading.Event()
# Used by the worker to indicate that allocation has changed
self.changed = threading.Event()
# The shared memory and lock used for sharing allocation info
self.lock = threading.Lock()
# Initialize the array
# self._set_partitions(self.allocated, [], ALLOCATION_CHANGING)
self.consumer_state = ALLOCATION_CHANGING
# create consumer id
hostname = socket.gethostname()
self.identifier = "%s-%s-%s-%s-%s" % (topic,
group,
hostname,
os.getpid(),
uuid.uuid4().hex)
log.info("Consumer id set to: %s" % self.identifier)
self.got_error = False
self.error = DefaultZSimpleConsumerException()
# Start the worker
self.partioner_thread = threading.Thread(target=self._check_and_allocate)
self.partioner_thread.daemon = True
self.partioner_thread.start()
self.on_stop_callback = None
def status(self):
"""
Returns the status of the consumer
"""
self._set_consumer(block=False)
if self.consumer_state == ALLOCATION_COMPLETED:
return 'ALLOCATED'
elif self.consumer_state == ALLOCATION_CHANGING:
return 'ALLOCATING'
elif self.consumer_state == ALLOCATION_FAILED:
return 'FAILED'
elif self.consumer_state == ALLOCATION_MISSED:
return 'MISSED'
elif self.consumer_state == ALLOCATION_INACTIVE:
return 'INACTIVE'
def _get_new_partitioner(self):
return self.zkclient.SetPartitioner(path=self.path,
set=self.partitions,
identifier=self.identifier,
time_boundary=self.time_boundary)
def _check_and_allocate(self):
"""
Checks if a new allocation is needed for the partitions.
If so, co-ordinates with Zookeeper to get a set of partitions
allocated for the consumer
"""
old = None
# Set up the partitioner
partitioner = self._get_new_partitioner()
# Once allocation is done, sleep for some time between each checks
sleep_time = self.time_boundary / 2.0
partition_allocation_attempt = 0
partition_allocation_start_time = time.time()
# Keep running the allocation logic till we are asked to exit
while not self.exit.is_set():
log.info("ZK Partitoner state: %s, topic: %s, group: %s" % (partitioner.state, self.consumer_topic, self.consumer_group))
try:
if partitioner.acquired:
# A new set of partitions has been acquired
new = list(partitioner)
# If there is a change, notify for a consumer change
if new != old:
log.info("Acquired partitions: %s" % str(new))
if len(new) > 0:
self.consumer = self.consumer_fact(partitions=new)
self.consumer.register_on_stop_callback(self.on_stop_callback)
else:
self.consumer = None
old = new
# Wait for a while before checking again. In the meantime
# wake up if the user calls for exit
self.exit.wait(sleep_time)
elif partitioner.release:
# We have been asked to release the partitions
log.info("Releasing partitions for reallocation")
old = None
if self.consumer is not None:
self.consumer.stop()
partitioner.release_set()
elif partitioner.failed:
# Partition allocation failed
# Failure means we need to create a new SetPartitioner:
# see: http://kazoo.readthedocs.org/en/latest/api/recipe/partitioner.html
log.error("Partitioner Failed. Creating new partitioner.")
partitioner = self._get_new_partitioner()
elif partitioner.allocating:
# We have to wait till the partition is allocated
partition_allocation_attempt += 1
partition_allocation_end_time = time.time()
log.info("Waiting for partition allocation, topic: {0}, group: {1}, count: {2}, time: {3}".format(
self.consumer_topic,
self.consumer_group,
partition_allocation_attempt,
partition_allocation_end_time - partition_allocation_start_time))
if partition_allocation_attempt > MAX_PARTITION_ALLOCATION_ATTEMPTS or \
partition_allocation_end_time - partition_allocation_start_time > MAX_PARTITION_ALLOCATION_TIME:
# we are probably spinning in a loop waiting for allocation
# reset the partitioner
# this is the fix put in that solved the issue in QA when there
# were multiple demuxers. TODO: unclear why it did not happen with
# chronos / picasso which also use zookeeper client and have multiple
# instances. (Maybe just luck?)
if self.consumer is not None:
self.consumer.stop()
partitioner.release_set()
old = None
# cleanup old one
partitioner.finish()
# create new one
partitioner = self._get_new_partitioner()
log.info('Creating new partitioner, as the old one was in ALLOCATING state for too many attempts, topic: {0}, group: {1}, count: {2}, time: {3}'.format(
self.consumer_topic,
self.consumer_group,
partition_allocation_attempt,
partition_allocation_end_time - partition_allocation_start_time))
partition_allocation_attempt = 0
partition_allocation_start_time = time.time()
partitioner.wait_for_acquire(timeout=1)
except SessionExpiredError as e:
log.error("Zookeeper session expired. Error:%s" % e)
self.error = e
self.got_error = True
break
except Exception as e:
log.error("Exception raised in partitioner thread. Error:%s" % e)
self.error = e
self.got_error = True
break
# Clean up
partitioner.finish()
def __iter__(self):
"""
Iterate through data available in partitions allocated to this
instance
"""
self._set_consumer(block=False)
if self.consumer is None:
raise RuntimeError("Error in partition allocation")
for msg in self.consumer:
yield msg
self._set_consumer(block=False)
def get_messages(self, count=1, block=True, timeout=0.1):
"""
Fetch the specified number of messages
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If None, and block=True, the API will block infinitely.
If >0, API will block for specified time (in seconds)
"""
if self.got_error:
raise self.error
if self.consumer is None:
# This is needed in cases where gevent is used with
# a thread that does not have any calls that would yield.
# If we do not sleep here a greenlet could spin indefinitely.
time.sleep(0)
return []
return self.consumer.get_messages(count, block, timeout)
def stop(self):
self.exit.set()
self.partioner_thread.join()
self.zkclient.stop()
self.zkclient.close()
self.client.close()
def commit(self):
if self.consumer:
self.consumer.commit()
def commit_offsets(self, offsets):
if self.consumer:
self.consumer.commit_offsets(offsets)
def register_on_stop_callback(self, fn):
self.on_stop_callback = fn
def seek(self, *args, **kwargs):
if self.consumer is None:
raise RuntimeError("Error in partition allocation")
elif not self.consumer:
raise RuntimeError("Waiting for partition allocation")
return self.consumer.seek(*args, **kwargs)
def pending(self):
if self.consumer is None:
raise RuntimeError("Error in partition allocation")
elif not self.consumer:
# We are in a transition/suspended state
return 0
return self.consumer.pending()
|
|
import os, sys, unittest
import setup_path
from lib.asmlib.linker import *
from lib.asmlib.assembler import *
from lib.asmlib.asm_common_types import *
from lib.commonlib.utils import *
from lib.commonlib.luz_defs import (
USER_MEMORY_START, USER_MEMORY_SIZE)
# Initial offset
IOF = 0x100000
op_call = 0x1D
op_lui = 0x06
op_ori = 0x2A
op_add = 0x0
op_sub = 0x1
class TestLinker(unittest.TestCase):
def setUp(self):
self.asm = Assembler()
self.linker = Linker(IOF)
def assemble(self, txt):
return self.asm.assemble(txt)
def link(self, object_files):
return self.linker.link(object_files)
def test_collect_exports(self):
obj = [0, 0, 0]
obj[0] = self.assemble(r'''
.segment my1
kwa: .word 1
kaw: .word 1
.global kwa
.global kaw
''')
obj[1] = self.assemble(r'''
.segment my1
kaw: .word 1
kwa14: .word 1
.global kwa14
''')
obj[2] = self.assemble(r'''
.segment my1
.word 1
jaxx: .word 1
.global jaxx
.global karma
.global rarma
.segment chipper
karma: .alloc 20
rarma: .alloc 20
''')
self.assertEqual(self.linker._collect_exports(obj),
{
'kaw': (0, SegAddr(segment='my1', offset=4)),
'kwa': (0, SegAddr(segment='my1', offset=0)),
'kwa14': (1, SegAddr(segment='my1', offset=4)),
'jaxx': (2, SegAddr(segment='my1', offset=4)),
'karma': (2, SegAddr(segment='chipper', offset=0)),
'rarma': (2, SegAddr(segment='chipper', offset=20))
})
def test_compute_segment_map(self):
# Basic sanity check
#
obj1 = self.assemble(r'''
.segment joe
add $r0, $r0, $r0
xor $r5, $r5, $r7
''')
obj2 = self.assemble(r'''
.segment moe
.alloc 4
.segment joe
and $r8, $r9, $r1
''')
self.assertEqual(
self.linker._compute_segment_map([obj1, obj2], IOF)[0],
[
{
'joe': IOF,
},
{
'joe': IOF + 8,
'moe': IOF + 12
}
])
# A more convoluted case with 2 files
#
obj1 = self.assemble(r'''
.segment text
add $r1, $r0, $r1
.alloc 11
.segment data
.word 0x1, 0x2, 0x3
''')
obj2 = self.assemble(r'''
.segment junk
.alloc 500
.alloc 500
.segment data
.word 0x90, 0x80, 0x90, 0x80, 0x80
.segment text
add $r1, $r0, $r2
add $r1, $r0, $r2
add $r1, $r0, $r2
add $r1, $r0, $r2
''')
self.assertEqual(
self.linker._compute_segment_map([obj1, obj2], IOF)[0],
[
{
'data': IOF,
'text': IOF + 32 + 1000,
},
{
'data': IOF + 12,
'junk': IOF + 32,
'text': IOF + 32 + 1000 + 16
}
])
def test_patch_segment_data(self):
#
#---- test CALL patch ----
#
obj1 = self.assemble(r'''
.segment junk
add $r1, $r0, $r2
.alloc 4
call bomba
.alloc 8
datum:
.word 50, 60, 70, 80
call datum
''')
seg_data = obj1.seg_data['junk']
saved_seg_data = seg_data[:]
# Perform "import patching"
#
self.linker._patch_segment_data(
seg_data=seg_data,
instr_offset=8,
type=ImportType.CALL,
mapped_address=0x65434)
# make sure the patch is correct
instr = bytes2word(seg_data[8:12])
self.assertEqual(extract_bitfield(instr, 31, 26), op_call)
self.assertEqual(extract_bitfield(instr, 25, 0), 0x65434/4)
# make sure nothing else was changed
self.assertEqual(seg_data[0:8], saved_seg_data[0:8])
self.assertEqual(seg_data[12:], saved_seg_data[12:])
# Now perform "relocation patching" on 'datum'
#
saved_seg_data = seg_data[:]
self.linker._patch_segment_data(
seg_data=seg_data,
instr_offset=36,
type=RelocType.CALL,
mapped_address=0x100000)
instr = bytes2word(seg_data[36:40])
self.assertEqual(extract_bitfield(instr, 31, 26), op_call)
self.assertEqual(extract_bitfield(instr, 25, 0), 0x100000/4+5)
self.assertEqual(seg_data[0:36], saved_seg_data[0:36])
#
#---- test LI patch ----
#
obj2 = self.assemble(r'''
.segment tiexto
add $r1, $r0, $r2
.alloc 8
li $r28, far_symbol
.alloc 8000
datum:
.word 50, 60, 70, 80
li $r20, datum
''')
seg_data = obj2.seg_data['tiexto']
# Perform "import patching"
#
saved_seg_data = seg_data[:]
self.linker._patch_segment_data(
seg_data=seg_data,
instr_offset=12,
type=ImportType.LI,
mapped_address=0xDEADBEEF)
# make sure the patch is correct
lui_instr = bytes2word(seg_data[12:16])
self.assertEqual(extract_bitfield(lui_instr, 31, 26), op_lui)
self.assertEqual(extract_bitfield(lui_instr, 15, 0), 0xDEAD)
ori_instr = bytes2word(seg_data[16:20])
self.assertEqual(extract_bitfield(ori_instr, 31, 26), op_ori)
self.assertEqual(extract_bitfield(ori_instr, 15, 0), 0xBEEF)
# make sure nothing else was changed
self.assertEqual(seg_data[0:12], saved_seg_data[0:12])
self.assertEqual(seg_data[20:], saved_seg_data[20:])
# Perform "relocation patching"
#
saved_seg_data = seg_data[:]
self.linker._patch_segment_data(
seg_data=seg_data,
instr_offset=8036,
type=RelocType.LI,
mapped_address=0xDEADBEEF)
# make sure the patch is correct
lui_instr = bytes2word(seg_data[8036:8040])
self.assertEqual(extract_bitfield(lui_instr, 31, 26), op_lui)
self.assertEqual(extract_bitfield(lui_instr, 15, 0), 0xDEAD)
ori_instr = bytes2word(seg_data[8040:8044])
self.assertEqual(extract_bitfield(ori_instr, 31, 26), op_ori)
self.assertEqual(extract_bitfield(ori_instr, 15, 0), 8020+0xBEEF)
# make sure nothing else was changed
self.assertEqual(seg_data[0:8036], saved_seg_data[0:8036])
self.assertEqual(seg_data[8044:], saved_seg_data[8044:])
def test_resolve_relocations(self):
obj1 = self.assemble(r'''
.segment joe
add $r0, $r0, $r0
add $r0, $r0, $r0
margie:
xor $r5, $r5, $r7
burka:
.word 0x70
.segment moe
call margie
li $r20, burka
''')
segment_map, total_size = self.linker._compute_segment_map([obj1], IOF)
self.assertEqual(segment_map,
[
{
'joe': IOF,
'moe': IOF + 16,
},
])
self.assertEqual(total_size, 28)
moe_data = obj1.seg_data['moe']
# make sure that nominally the instructions are what we
# expect.
#
call_instr = bytes2word(moe_data[0:4])
self.assertEqual(call_instr,
build_bitfield(31, 26, op_call) |
build_bitfield(25, 0, 8 / 4))
lui_instr = bytes2word(moe_data[4:8])
ori_instr = bytes2word(moe_data[8:12])
self.assertEqual(extract_bitfield(lui_instr, 15, 0), 0)
self.assertEqual(extract_bitfield(ori_instr, 15, 0), 12)
# Now resolve the relocation
#
self.linker._resolve_relocations([obj1], segment_map)
# check that the instruction's destination was relocated
# properly
#
call_instr = bytes2word(moe_data[0:4])
self.assertEqual(call_instr,
build_bitfield(31, 26, op_call) |
build_bitfield(25, 0, (IOF + 8) / 4))
lui_instr = bytes2word(moe_data[4:8])
ori_instr = bytes2word(moe_data[8:12])
self.assertEqual(extract_bitfield(lui_instr, 15, 0), 0x10)
self.assertEqual(extract_bitfield(ori_instr, 15, 0), 12)
def test_resolve_imports(self):
obj = [0, 0]
obj[0] = self.assemble(r'''
.segment moe
kaw: .word 1
kwa14: .word 1
call karma
li $r20, jaxx
''')
obj[1] = self.assemble(r'''
.segment my1
.word 1
jaxx: .word 1
.global jaxx
.global karma
.global rarma
.segment chipper
karma: .alloc 20
rarma: .alloc 20
''')
segment_map, total_size = self.linker._compute_segment_map(obj, IOF)
exports = self.linker._collect_exports(obj)
moe_data = obj[0].seg_data['moe']
# make sure that nominally the instructions are what we
# expect.
#
call_instr = bytes2word(moe_data[8:12])
self.assertEqual(extract_bitfield(call_instr, 25, 0), 0)
lui_instr = bytes2word(moe_data[12:16])
ori_instr = bytes2word(moe_data[16:20])
self.assertEqual(extract_bitfield(lui_instr, 15, 0), 0)
self.assertEqual(extract_bitfield(ori_instr, 15, 0), 0)
# Now resolve the imports
#
self.linker._resolve_imports(obj, segment_map, exports)
# check correct resolutions
#
chipper_addr = segment_map[1]['chipper']
call_instr = bytes2word(moe_data[8:12])
self.assertEqual(extract_bitfield(call_instr, 25, 0),
chipper_addr / 4)
my1_addr = segment_map[1]['my1']
lui_instr = bytes2word(moe_data[12:16])
ori_instr = bytes2word(moe_data[16:20])
self.assertEqual(extract_bitfield(lui_instr, 15, 0),
my1_addr >> 16)
self.assertEqual(extract_bitfield(ori_instr, 15, 0),
(my1_addr & 0xFFFF) + 4)
def test_link(self):
obj0 = self.assemble(r'''
.segment moe
.global asm_main
asm_main:
add $r5, $r6, $sp
kaw: .word 1
kwa14: .word 1
li $r20, kaw
''')
linker = Linker(USER_MEMORY_START, USER_MEMORY_SIZE)
image = linker.link([obj0])
sp_ptr = IOF + USER_MEMORY_SIZE - 4
# 12 bytes for __startup
# 16 bytes in segment moe
# 4 bytes for __heap
#
self.assertEqual(len(image), 36)
# The initial 'LI' pointing to $sp
#
lui_instr = bytes2word(image[0:4])
self.assertEqual(lui_instr,
build_bitfield(31, 26, op_lui) |
build_bitfield(25, 21, 29) |
build_bitfield(15, 0, sp_ptr >> 16))
ori_instr = bytes2word(image[4:8])
self.assertEqual(ori_instr,
build_bitfield(31, 26, op_ori) |
build_bitfield(25, 21, 29) |
build_bitfield(20, 16, 29) |
build_bitfield(15, 0, sp_ptr & 0xFFFF))
# calling 'asm_main'
# 'moe' will be mapped after __startup, so at 16
call_instr = bytes2word(image[8:12])
self.assertEqual(call_instr,
build_bitfield(31, 26, op_call) |
build_bitfield(25, 0, (IOF + 12) / 4))
# Now the first instruction of 'moe'
#
add_instr = bytes2word(image[12:16])
self.assertEqual(add_instr,
build_bitfield(31, 26, op_add) |
build_bitfield(25, 21, 5) |
build_bitfield(20, 16, 6) |
build_bitfield(15, 11, 29))
class TestLinkerErrors(unittest.TestCase):
def setUp(self):
self.asm = Assembler()
self.linker = Linker(IOF)
def assemble(self, txt):
return self.asm.assemble(txt)
def link(self, object_files):
return self.linker.link(object_files)
def assert_str_contains(self, str, what):
self.failUnless(str.find(what) > -1, '"%s" contains "%s"' % (str, what))
def assert_linker_error(self, objs, msg):
try:
self.link(objs)
except LinkerError:
err = sys.exc_info()[1]
err_msg = str(err)
self.assert_str_contains(err_msg, msg)
else:
self.fail('LinkerError not raised')
def test_collect_exports_errors(self):
obj1 = self.assemble(r'''
.segment text
jaxx: add $r1, $r0, $r1
.global jaxx
''')
obj2 = self.assemble(r'''
.segment data
.global joe
.global jaxx
jaxx:
joe:
''')
self.assert_linker_error([obj1, obj2], 'Duplicated export')
def test_patch_segment_data_errors(self):
obj1 = self.assemble(r'''
.segment junk
add $r1, $r0, $r2
.alloc 4
call bomba
li $r8, 1550505
.alloc 8
datum:
.word 50, 60, 70, 80
call datum
''')
seg_data = obj1.seg_data['junk']
# "import patching" with offset to a wrong instruction
#
try:
self.linker._patch_segment_data(
seg_data=seg_data,
instr_offset=12,
type=ImportType.CALL,
mapped_address=0x65434)
except LinkerError:
err = sys.exc_info()[1]
err_msg = str(err)
self.assert_str_contains(err_msg, 'expected CALL')
else:
self.fail('LinkerError not raised')
def test_link_errors(self):
obj0 = self.assemble(r'''
.segment moe
asm_main:
add $r5, $r6, $sp
kaw: .word 1
kwa14: .word 1
li $r20, kaw
''')
# no global (exported) asm_main, although the label is
# defined
#
self.assert_linker_error([obj0], "import of symbol 'asm_main")
#-----------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
|
#! /usr/bin/env python
##################################################################################################################
# attention.py
#
# Jackie Lee
# [email protected]
#
# Affective Computing Group, MIT Media Laboratory
# Special Thanks to Heymian Wong, Jon Wetzel
# Last modified on Aug. 9, 2011
#
# Requirement:
# MacOSX 10.6.4
# Using OpenCV 2.1
# (you could make your life easier by install this-
# http://www.cs.colostate.edu/facerec/algorithms/support/OpenCV2.1_rev3291_MacOS10.6.pkg)
#
##################################################################################################################
import sys
import time
import os
import cv #try this first to see if your OpenCV is ok to go.
import socket
import serial
IPADDR = '10.10.100.254'
PORTNUM = 8899
PACKETDATA='220055'.decode('hex')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((IPADDR, PORTNUM))
s.send(PACKETDATA)
s.close()
ser = serial.Serial('/dev/tty.usbmodem1421', 9600) # Establish the connection on a specific port on Arduino
### Face detection constants
#Face movement constants
CAPTURING = 0 ## set 1 to enable saving JPGs into img/
FACE_MIN_SIZE = 70 ## the bigger, the more fps
FACE_MAX_MOVEMENT = 40
FACE_MAX_LIFE = 1
FACE_LR_MOVE_THRESH = 2
FACE_UD_MOVE_THRESH = 1
FACE_LR_STATE_CHANGE_THRESH = 1
FACE_UD_STATE_CHANGE_THRESH = 1
FACE_ALTERNATION_THRESH = 2
FACE_ONE_DIMENSION_THRESH = 2
FACE_STILL_THRESHOLD = 2
FACE_ALTERNATIONS_EXPIRE = 2
globt = 1
#light patch sales things
arrive = 10;
early = 150;
medium = 250;
late = 300;
mood = 0
#Face movement enumeration
OTHER = 0
STILL = 1
LEFT = 2
RIGHT = 3
UP = 4
DOWN = 5
i = 0
m = 0
#Color donstant definitions
RED = cv.RGB(255,0,0)
GREEN = cv.RGB (0,220,0)
BLUE = cv.RGB (0,0,255)
YELLOW = cv.RGB(255,255,0);
ORANGE = cv.RGB(255,127,0);
MAGENTA = cv.RGB(255,0,255);
# other constants
scale = 1
cascade = None
storage = cv.CreateMemStorage(0)
cascade_name = "xml/haarcascade_frontalface_alt.xml"
min_size = (FACE_MIN_SIZE,FACE_MIN_SIZE)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = cv.CV_HAAR_DO_CANNY_PRUNING
age = 0
age1 = 0
age2 = 0
age3 = 0
age4 = 0
age5 = 0
ageavg = 0
agerate = 0
metric = 0
cmet = ""
att = 5
trackedFaces = []
IPL_DEPTH_8U = 8
gray = 0
small_img = 0
osName = os.name
fname_temp=""
### end of Face detection constants
### save as JPG for every 2 seconds
def saveAsJPG(img):
global fname_temp
lt = time.localtime(time.time())
if ((lt[5] %2) == 0):
fname = "%04d%02d%02d%02d%02d%02d" % (lt[0], lt[1], lt[2], lt[3], lt[4], lt[5])
if (fname != fname_temp):
print "frame saved at " + fname
cv.SaveImage("img/"+fname+".jpg",img)
fname_temp = fname
### end save as JPG
########## Face Class #############
class Face:
def __init__(self,age,width,height,xpt, ypt,life, att):
self.age = age;
self.width = width;
self.height = height;
self.xpt = xpt;
self.ypt = ypt;
self.life = life;
self.att = att;
#self.printFace();
self.updateEyes();
self.updateMouth();
self.state = OTHER;
self.lastState = self.state;
self.alternations = 0;
self.faceStill = 0;
self.stills = 0;
self.lefts = 0;
self.rights = 0
self.ups = 0;
self.downs = 0;
def updateFace(self, width, height, xpt, ypt,att):
turnDir = self.getTurnDir(self.xpt, xpt, self.ypt, ypt, self.width, width, self.height, height)
self.updateMoveState(turnDir)
#print turnDir
self.age = self.age + 1;
global age1
age1 = self.age;
self.width = width;
self.height = height;
self.xpt = xpt;
self.ypt = ypt;
self.life = 0;
self.updateEyes();
self.updateMouth();
#self.att=self.age;
def updateEyes(self):
self.eyeTopline = self.ypt + ((self.height*1)/3);
self.eyeBotline = self.ypt + ((self.height*1)/2);
self.eyeLeft1 = (self.xpt + (self.width/5),self.eyeTopline);
self.eyeLeft2 = (self.xpt + ((self.width*3)/8), self.eyeBotline);
self.eyeRight1 = (self.xpt + ((self.width*5)/8),self.eyeTopline);
self.eyeRight2 = (self.xpt + ((self.width*4)/5),self.eyeBotline);
def updateMouth(self):
self.mouthTopline = self.ypt + ((self.height*2)/3);
self.mouthBotline = self.ypt + self.height;
self.mouthTopLeft = (self.xpt + self.width/5, self.mouthTopline);
self.mouthBotRight = (self.xpt + (self.width*4)/5, self.mouthBotline);
def isShaking(self):
if (self.alternations < FACE_ALTERNATION_THRESH):
return False
else:
self.att-=1 #saying no
global mood
mood = self.att
if ((self.state == LEFT) or (self.state == RIGHT)):
return True
else:
return False
def isNodding(self):
if (self.alternations < FACE_ALTERNATION_THRESH):
return False
else:
self.att+=1 #saying yes
mood = self.att
if ((self.state == UP) or (self.state ==DOWN)):
return True
else:
return False
def isStill(self):
return (self.faceStill < FACE_STILL_THRESHOLD)
def updateMoveState(self, turnDir):
if (turnDir == OTHER):
self.faceStill += 1
self.state = OTHER
elif (turnDir == STILL):
if (self.state != STILL):
lastState = self.state
else:
self.faceStill = 0
self.state = STILL
self.stills += 1
if (self.stills > FACE_ALTERNATIONS_EXPIRE):
self.alternations = 0
self.stills = 0
elif (turnDir == RIGHT):
self.faceStill += 1
if (self.state == OTHER):
self.rights += 1
if (self.rights > FACE_LR_STATE_CHANGE_THRESH):
self.state = RIGHT
elif (self.state == RIGHT):
self.rights += 1
elif (self.state == LEFT):
self.rights += 1
if (self.rights > FACE_LR_STATE_CHANGE_THRESH):
self.state = RIGHT;
self.resetNonAltCounts()
self.alternations += 1
elif ((self.state == UP) or (self.state == DOWN)):
self.state = OTHER
self.resetCounts()
elif(self.state == STILL):
if (self.lastState == LEFT):
self.alternations += 1
self.state = RIGHT
elif (turnDir ==LEFT):
self.faceStill += 1
if (self.state == OTHER):
self.lefts += 1
if (self.lefts > FACE_LR_STATE_CHANGE_THRESH):
self.state = LEFT;
elif (self.state == RIGHT):
self.lefts += 1
if(self.lefts > FACE_LR_STATE_CHANGE_THRESH):
self.state = LEFT
self.resetNonAltCounts()
self.alternations += 1
elif (self.state == LEFT):
self.lefts += 1
elif ((self.state ==UP) or (self.state == DOWN)):
self.state = OTHER
self.resetCounts()
elif (self.state == STILL):
if (self.lastState == RIGHT):
self.alternations += 1
self.state = LEFT
elif (turnDir == UP):
self.faceStill += 1
if (self.state == OTHER):
self.ups += 1
if (self.ups > FACE_UD_STATE_CHANGE_THRESH):
self.state = UP
elif (self.state == DOWN):
self.ups += 1
if (self.ups > FACE_UD_STATE_CHANGE_THRESH):
self.state = UP
self.resetNonAltCounts()
self.alternations += 1
elif (self.state == UP):
self.ups += 1
elif ((self.state == LEFT) or (self.state == RIGHT)):
self.state = OTHER
self.resetCounts()
elif (self.state == STILL):
if (self.lastState == DOWN):
self.alternations += 1
self.state = UP
elif (turnDir == DOWN):
self.faceStill += 1
if (self.state == OTHER):
self.downs += 1
if (self.downs > FACE_UD_STATE_CHANGE_THRESH):
self.state = DOWN
elif (self.state == UP):
self.downs += 1
if (self.downs > FACE_UD_STATE_CHANGE_THRESH):
self.state = DOWN
self.resetNonAltCounts()
self.alternations += 1
elif (self.state == DOWN):
self.downs += 1
elif ((self.state == LEFT) or (self.state == RIGHT)):
self.state = OTHER
self.resetCounts()
elif (self.state == STILL):
if (self.lastState == UP):
self.altnerations += 1
self.state = DOWN
def resetCounts(self):
self.others = 0
self.stills = 0
self.rights = 0
self.lefts = 0
self.ups = 0
self.downs = 0
self.alternations = 0
def resetNonAltCounts(self):
self.others = 0
self.stills = 0
self.rights = 0
self.lefts = 0
self.ups = 0
self.downs = 0
def getTurnDir(self, old_xpt, new_xpt, old_ypt, new_ypt, old_width, new_width, old_height, new_height):
old_x = (int (old_xpt + (old_width/2)))
new_x = (int (new_xpt + (new_width/2)))
old_y = (int (old_ypt + (old_height/2)))
new_y = (int (new_ypt + (new_height/2)))
xdir = STILL
ydir = STILL
if (new_x - old_x > FACE_LR_MOVE_THRESH):
xdir = RIGHT
if (new_x - old_x < -FACE_LR_MOVE_THRESH):
xdir = LEFT
if (new_y - old_y > FACE_UD_MOVE_THRESH):
ydir = DOWN
if (new_y - old_y < -FACE_UD_MOVE_THRESH):
ydir = UP
if (ydir == xdir):
return STILL
else:
if ((ydir != STILL) and (xdir !=STILL)):
if ((abs(new_x - old_x)) > (abs(new_y - old_y)/2)):
return xdir
else:
if (((abs(new_y - old_y)) - (abs(new_x - old_x))) > FACE_ONE_DIMENSION_THRESH):
return ydir
else:
return OTHER;
else:
if (xdir == STILL):
return ydir
else:
return xdir
def isTooOld(self):
if (self.life > FACE_MAX_LIFE):
return True;
else:
return False;
def updateLife(self):
self.life = self.life+1;
return self.life;
########## end of Face Class #############
#### Detect faces ######################
def detect_and_draw(img ,cascade):
t = cv.GetTickCount() ## start counter
cv.CvtColor( img, gray, cv.CV_BGR2GRAY )
cv.Resize( gray, small_img, cv.CV_INTER_LINEAR )
#Ages all trackedFaces
for f in trackedFaces:
f.updateLife()
#f.printFace();
#Remove expired faces
for f in trackedFaces:
if (f.isTooOld()):
trackedFaces.remove(f)
faces = cv.HaarDetectObjects( small_img, cascade, storage, haar_scale, min_neighbors, haar_flags, min_size )
drawline = 0
if faces:
#found a face
for ((x, y, w, h), n) in faces:
matchedFace = False;
pt1 = ( int(x*image_scale), int(y*image_scale))
pt2 = ( int((x+w)*image_scale), int((y+h)*image_scale) )
pt3 = ( int(x*image_scale)+int(((x+w)*image_scale-x*image_scale)/3), int(y*image_scale))
pt4 = ( int((x+w)*image_scale)-int(((x+w)*image_scale-x*image_scale)/3), int((y*image_scale)+int(((y+h)*image_scale)-int(y*image_scale))/3) )
#check if there are trackedFaces
if (len(trackedFaces) > 0):
#each face being tracked
for f in trackedFaces:
#the face is found (small movement) RIGHT ON THE MONEY!!!
if ((abs(f.xpt - pt1[0]) < FACE_MAX_MOVEMENT) and (abs(f.ypt - pt1[1]) < FACE_MAX_MOVEMENT)):
matchedFace = True;
f.updateFace(int(w*image_scale), int(h*image_scale), pt1[0], pt1[1], att);
mf = f;
break;
#if face not found, add a new face
if (matchedFace == False):
f = Face(0,int(w*image_scale), int(h*image_scale), pt1[0], pt1[1],0, att);
trackedFaces.append(f);
mf = f;
#No tracked faces: adding one
else:
f = Face(0,int (w*image_scale), int (h*image_scale), pt1[0], pt1[1],0, att);
trackedFaces.append(f);
mf = f;
#where to draw face and properties
if (mf.age > 5):
#draw attention line
lnpt1 = (int (mf.xpt*scale), int(mf.ypt*scale-5)-5)
if (mf.age > mf.width):
lnpt2 = (int (mf.xpt*scale+mf.width), int(mf.ypt*scale-5))
else:
lnpt2 = (int (mf.xpt*scale+mf.age), int(mf.ypt*scale-5))
cv.Rectangle(img, lnpt1, lnpt2, RED, 4, 8, 0) ## drawing bolded attention line
### draw eyes
cv.Rectangle(img, mf.eyeLeft1, mf.eyeLeft2, MAGENTA, 3,8,0)
cv.Rectangle(img, mf.eyeRight1, mf.eyeRight2, MAGENTA, 3,8,0)
#
### draw mouth
cv.Rectangle(img, mf.mouthTopLeft, mf.mouthBotRight, ORANGE, 3, 8, 0)
#
### draw face
cv.Rectangle( img, pt1, pt2, getColor(mf), 3, 8, 0 )
#cv.Rectangle( img, pt3, pt4, MAGENTA, 1, 8, 0 ) #forehead
drawline = mf.age
if(CAPTURING): saveAsJPG(img)
if (osName == "nt"): cv.Flip(img, img, 0)
cv.ShowImage ('Camera', img)
t = cv.GetTickCount() - t ## counter for FPS
#print "%i fps." % (cv.GetTickFrequency()*1000000./t) ## print FPS
global globt
globt= t;
#### end of Detect faces ######################
def getColor(mf):
if (mf.isNodding()): return GREEN
elif (mf.isShaking()): return RED
elif (mf.isStill()): return BLUE
else: return YELLOW
######### main program ############
if __name__ == '__main__':
#create window and move to screen position
cv.NamedWindow ('Camera', cv.CV_WINDOW_AUTOSIZE)
if len (sys.argv) == 1:
# no argument on the command line, try to use the camera
capture = cv.CreateCameraCapture (0)
#
### check that capture device is OK
if not capture:
print "Error opening capture device"
sys.exit (1)
#
### capture the 1st frame to get some propertie on it
frame = cv.QueryFrame (capture)
#
### get size of the frame
frame_size = cv.GetSize (frame)
gray = cv.CreateImage( frame_size, 8, 1 )
small_img = cv.CreateImage( ( int( frame_size[0]/image_scale),int( frame_size[1]/image_scale)), 8, 1 )
cascade = cv.Load( cascade_name)
#
while 1: # do forever
i += 1
# capture the current image
frame = cv.QueryFrame (capture)
if frame is None:
# no image captured... end the processing
break
#
### check OS
if (osName == "nt"):
cv.Flip(frame, frame, 0)
else:
cv.Flip(frame, None, 1)
#
### detecting faces here
detect_and_draw(frame, cascade)
ardout = ser.readline()
print ardout
if i%30 == 0:
if (age1>age2): m+=1
else: m-=1
agerate = (age1 - 60 - age5)/4
age5 = age4
age4 = age3
age3 = age2
age2 = age1
ageavg = (age1+age2+age3+age4+age5)/5
metric = ageavg + agerate
print str(age1) + '\t' + str(ageavg) + '\t' + str(agerate) + '\t' + str(metric) + '\t' + str(mood)
if (mood<0): cmet = "negative"
elif (mood>0): cmet = "positive"
# addressing information of target
IPADDR = '10.10.100.254'
PORTNUM = 8899
# enter the data content of the UDP packet as hex
if (age1 < arrive): color = "purple"
elif ((age1 > arrive) & (age1 <= early)):
if (cmet == "positive"):
color = "blue"
age1 = age1+30
elif (cmet== "negative"):
color = "red"
age1 = age1-10
else: color = "yellow"
elif ((age1 > early) & (age1 <= medium)):
if (cmet == "positive"):
color = "green"
age1 = age1+30
elif (cmet == "negative"):
color = "red"
age1 = age1-10
else: color = "blue"
elif ((age1 > medium) & (age1 <= late)):
if (cmet == "positive"):
color == "green"
age1 = age1+30
elif (cmet == "negative"):
color = "red"
age1 = age1-10
else: color = "green"
else: color = "purple"
if (color=="purple"):PACKETDATA='20bf55'.decode('hex')
elif (color=="blue"):PACKETDATA='20ff55'.decode('hex')
elif (color=="yellow"):PACKETDATA='207f55'.decode('hex')
elif (color=="green"):PACKETDATA='206f55'.decode('hex')
elif (color== "orange"):PACKETDATA='208f55'.decode('hex')
elif (color== "red"):PACKETDATA='209f55'.decode('hex')
else: PACKETDATA='209f55'.decode('hex')
# initialize a socket, think of it as a cable
# SOCK_DGRAM specifies that this is UDP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
# connect the socket, think of it as connecting the cable to the address location
s.connect((IPADDR, PORTNUM))
# send the command
s.send(PACKETDATA)
# close the socket
s.close()
time.sleep(1.0)
### handle key events
k = cv.WaitKey (5)
if k % 0x100 == 27:
# user has press the ESC key, so exit
IPADDR = '10.10.100.254'
PORTNUM = 8899
PACKETDATA='210055'.decode('hex')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((IPADDR, PORTNUM))
s.send(PACKETDATA)
s.close()
cv.DestroyWindow('Camera');
break
|
|
from __future__ import unicode_literals
import os.path
import optparse
import sys
from .downloader.external import list_external_downloaders
from .compat import (
compat_expanduser,
compat_get_terminal_size,
compat_getenv,
compat_kwargs,
compat_shlex_split,
)
from .utils import (
preferredencoding,
write_string,
)
from .version import __version__
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
res = []
for l in optionf:
res += compat_shlex_split(l, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
opts.append(' %s' % option.metavar)
return "".join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
def _hide_login_info(opts):
opts = list(opts)
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
try:
i = opts.index(private_opt)
opts[i + 1] = 'PRIVATE'
except ValueError:
pass
return opts
# No need to wrap help messages if we're on a wide console
columns = compat_get_terminal_size().columns
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version': __version__,
'formatter': fmt,
'usage': '%prog [OPTIONS] URL [URL...]',
'conflict_handler': 'resolve',
}
parser = optparse.OptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, 'General Options')
general.add_option(
'-h', '--help',
action='help',
help='Print this help text and exit')
general.add_option(
'-v', '--version',
action='version',
help='Print program version and exit')
general.add_option(
'-U', '--update',
action='store_true', dest='update_self',
help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option(
'-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', default=False,
help='Continue on download errors, for example to skip unavailable videos in a playlist')
general.add_option(
'--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option(
'--dump-user-agent',
action='store_true', dest='dump_user_agent', default=False,
help='Display the current browser identification')
general.add_option(
'--list-extractors',
action='store_true', dest='list_extractors', default=False,
help='List all supported extractors')
general.add_option(
'--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions', default=False,
help='Output descriptions of all supported extractors')
general.add_option(
'--force-generic-extractor',
action='store_true', dest='force_generic_extractor', default=False,
help='Force extraction to use the generic extractor')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. '
'When given in the global configuration file /etc/youtube-dl.conf: '
'Do not read the user configuration in ~/.config/youtube-dl/config '
'(%APPDATA%/youtube-dl/config.txt on Windows)')
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist',
default=False,
help='Do not extract the videos of a playlist, only list them.')
general.add_option(
'--no-color', '--no-colors',
action='store_true', dest='no_color',
default=False,
help='Do not emit color codes in output')
network = optparse.OptionGroup(parser, 'Network Options')
network.add_option(
'--proxy', dest='proxy',
default=None, metavar='URL',
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
network.add_option(
'--socket-timeout',
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
help='Time to wait before giving up, in seconds')
network.add_option(
'--source-address',
metavar='IP', dest='source_address', default=None,
help='Client-side IP address to bind to (experimental)',
)
network.add_option(
'-4', '--force-ipv4',
action='store_const', const='0.0.0.0', dest='source_address',
help='Make all connections via IPv4 (experimental)',
)
network.add_option(
'-6', '--force-ipv6',
action='store_const', const='::', dest='source_address',
help='Make all connections via IPv6 (experimental)',
)
network.add_option(
'--cn-verification-proxy',
dest='cn_verification_proxy', default=None, metavar='URL',
help='Use this proxy to verify the IP address for some Chinese sites. '
'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading. (experimental)'
)
selection = optparse.OptionGroup(parser, 'Video Selection')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='Playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='Playlist video to end at (default is last)')
selection.add_option(
'--playlist-items',
dest='playlist_items', metavar='ITEM_SPEC', default=None,
help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
help='Download only matching titles (regex or caseless sub-string)')
selection.add_option(
'--reject-title',
dest='rejecttitle', metavar='REGEX',
help='Skip download for matching titles (regex or caseless sub-string)')
selection.add_option(
'--max-downloads',
dest='max_downloads', metavar='NUMBER', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option(
'--min-filesize',
metavar='SIZE', dest='min_filesize', default=None,
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--max-filesize',
metavar='SIZE', dest='max_filesize', default=None,
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--date',
metavar='DATE', dest='date', default=None,
help='Download only videos uploaded in this date')
selection.add_option(
'--datebefore',
metavar='DATE', dest='datebefore', default=None,
help='Download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter',
metavar='DATE', dest='dateafter', default=None,
help='Download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views',
metavar='COUNT', dest='min_views', default=None, type=int,
help='Do not download any videos with less than COUNT views')
selection.add_option(
'--max-views',
metavar='COUNT', dest='max_views', default=None, type=int,
help='Do not download any videos with more than COUNT views')
selection.add_option(
'--match-filter',
metavar='FILTER', dest='match_filter', default=None,
help=(
'Generic video filter (experimental). '
'Specify any key (see help for -o for a list of available keys) to'
' match if the key is present, '
'!key to check if the key is not present,'
'key > NUMBER (like "comment_count > 12", also works with '
'>=, <, <=, !=, =) to compare against a number, and '
'& to require multiple matches. '
'Values which are not known are excluded unless you'
' put a question mark (?) after the operator.'
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
'--no-playlist',
action='store_true', dest='noplaylist', default=False,
help='Download only the video, if the URL refers to a video and a playlist.')
selection.add_option(
'--yes-playlist',
action='store_false', dest='noplaylist', default=False,
help='Download the playlist, if the URL refers to a video and a playlist.')
selection.add_option(
'--age-limit',
metavar='YEARS', dest='age_limit', default=None, type=int,
help='Download only videos suitable for the given age')
selection.add_option(
'--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads',
dest='include_ads', action='store_true',
help='Download advertisements as well (experimental)')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option(
'-u', '--username',
dest='username', metavar='USERNAME',
help='Login with this account ID')
authentication.add_option(
'-p', '--password',
dest='password', metavar='PASSWORD',
help='Account password. If this option is left out, youtube-dl will ask interactively.')
authentication.add_option(
'-2', '--twofactor',
dest='twofactor', metavar='TWOFACTOR',
help='Two-factor auth code')
authentication.add_option(
'-n', '--netrc',
action='store_true', dest='usenetrc', default=False,
help='Use .netrc authentication data')
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
help='Video password (vimeo, smotri, youku)')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='Video format code, see the "FORMAT SELECTION" for all the info')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
help='Download all available video formats')
video_format.add_option(
'--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False,
help='Prefer free video formats unless a specific one is requested')
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
help='List all available formats of specified videos')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
video_format.add_option(
'--youtube-skip-dash-manifest',
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifests and related data on YouTube videos')
video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
help=(
'If a merge is required (e.g. bestvideo+bestaudio), '
'output to given container format. One of mkv, mp4, ogg, webm, flv. '
'Ignored if no merge is required'))
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
subtitles.add_option(
'--write-sub', '--write-srt',
action='store_true', dest='writesubtitles', default=False,
help='Write subtitle file')
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
help='Write automatically generated subtitle file (YouTube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
help='Download all the available subtitles of the video')
subtitles.add_option(
'--list-subs',
action='store_true', dest='listsubtitles', default=False,
help='List all available subtitles for the video')
subtitles.add_option(
'--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"')
subtitles.add_option(
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='Languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT',
help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option(
'-R', '--retries',
dest='retries', metavar='RETRIES', default=10,
help='Number of retries (default is %default), or "infinite".')
downloader.add_option(
'--buffer-size',
dest='buffersize', metavar='SIZE', default='1024',
help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
downloader.add_option(
'--no-resize-buffer',
action='store_true', dest='noresizebuffer', default=False,
help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
downloader.add_option(
'--test',
action='store_true', dest='test', default=False,
help=optparse.SUPPRESS_HELP)
downloader.add_option(
'--playlist-reverse',
action='store_true',
help='Download playlist videos in reverse order')
downloader.add_option(
'--xattr-set-filesize',
dest='xattr_set_filesize', action='store_true',
help='Set file xattribute ytdl.filesize with expected filesize (experimental)')
downloader.add_option(
'--hls-prefer-native',
dest='hls_prefer_native', action='store_true',
help='Use the native HLS downloader instead of ffmpeg (experimental)')
downloader.add_option(
'--external-downloader',
dest='external_downloader', metavar='COMMAND',
help='Use the specified external downloader. '
'Currently supports %s' % ','.join(list_external_downloaders()))
downloader.add_option(
'--external-downloader-args',
dest='external_downloader_args', metavar='ARGS',
help='Give these arguments to the external downloader')
workarounds = optparse.OptionGroup(parser, 'Workarounds')
workarounds.add_option(
'--encoding',
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation')
workarounds.add_option(
'--prefer-insecure',
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent',
metavar='UA', dest='user_agent',
help='Specify a custom user agent')
workarounds.add_option(
'--referer',
metavar='URL', dest='referer', default=None,
help='Specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header',
metavar='FIELD:VALUE', dest='headers', action='append',
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround',
dest='bidi_workaround', action='store_true',
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
workarounds.add_option(
'--sleep-interval', metavar='SECONDS',
dest='sleep_interval', type=float,
help='Number of seconds to sleep before each download.')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
verbosity.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='Activate quiet mode')
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option(
'-s', '--simulate',
action='store_true', dest='simulate', default=False,
help='Do not download the video and do not write anything to disk')
verbosity.add_option(
'--skip-download',
action='store_true', dest='skip_download', default=False,
help='Do not download the video')
verbosity.add_option(
'-g', '--get-url',
action='store_true', dest='geturl', default=False,
help='Simulate, quiet but print URL')
verbosity.add_option(
'-e', '--get-title',
action='store_true', dest='gettitle', default=False,
help='Simulate, quiet but print title')
verbosity.add_option(
'--get-id',
action='store_true', dest='getid', default=False,
help='Simulate, quiet but print id')
verbosity.add_option(
'--get-thumbnail',
action='store_true', dest='getthumbnail', default=False,
help='Simulate, quiet but print thumbnail URL')
verbosity.add_option(
'--get-description',
action='store_true', dest='getdescription', default=False,
help='Simulate, quiet but print video description')
verbosity.add_option(
'--get-duration',
action='store_true', dest='getduration', default=False,
help='Simulate, quiet but print video length')
verbosity.add_option(
'--get-filename',
action='store_true', dest='getfilename', default=False,
help='Simulate, quiet but print output filename')
verbosity.add_option(
'--get-format',
action='store_true', dest='getformat', default=False,
help='Simulate, quiet but print output format')
verbosity.add_option(
'-j', '--dump-json',
action='store_true', dest='dumpjson', default=False,
help='Simulate, quiet but print JSON information. See --output for a description of available keys.')
verbosity.add_option(
'-J', '--dump-single-json',
action='store_true', dest='dump_single_json', default=False,
help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
verbosity.add_option(
'--print-json',
action='store_true', dest='print_json', default=False,
help='Be quiet and print the video information as JSON (video is still being downloaded).',
)
verbosity.add_option(
'--newline',
action='store_true', dest='progress_with_newline', default=False,
help='Output progress bar as new lines')
verbosity.add_option(
'--no-progress',
action='store_true', dest='noprogress', default=False,
help='Do not print progress bar')
verbosity.add_option(
'--console-title',
action='store_true', dest='consoletitle', default=False,
help='Display progress in console titlebar')
verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print various debugging information')
verbosity.add_option(
'--dump-pages', '--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='Print downloaded pages encoded using base64 to debug problems (very verbose)')
verbosity.add_option(
'--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option(
'--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option(
'--print-traffic', '--dump-headers',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
verbosity.add_option(
'-C', '--call-home',
dest='call_home', action='store_true', default=False,
help='Contact the youtube-dl server for debugging')
verbosity.add_option(
'--no-call-home',
dest='call_home', action='store_false', default=False,
help='Do NOT contact the youtube-dl server for debugging')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
filesystem.add_option(
'-a', '--batch-file',
dest='batchfile', metavar='FILE',
help='File containing URLs to download (\'-\' for stdin)')
filesystem.add_option(
'--id', default=False,
action='store_true', dest='useid', help='Use only video ID in file name')
filesystem.add_option(
'-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('Output filename template. Use %(title)s to get the title, '
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
'%(autonumber)s to get an automatically incremented number, '
'%(ext)s for the filename extension, '
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
'%(format_id)s for the unique id of the format (like YouTube\'s itags: "137"), '
'%(upload_date)s for the upload date (YYYYMMDD), '
'%(extractor)s for the provider (youtube, metacafe, etc), '
'%(id)s for the video id, '
'%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist. '
'%(height)s and %(width)s for the width and height of the video format. '
'%(resolution)s for a textual description of the resolution of the video format. '
'%% for a literal percent. '
'Use - to output to stdout. Can also be used to download to a different directory, '
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option(
'--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
filesystem.add_option(
'--restrict-filenames',
action='store_true', dest='restrictfilenames', default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
help='[deprecated] Use title in file name (default)')
filesystem.add_option(
'-l', '--literal', default=False,
action='store_true', dest='usetitle',
help='[deprecated] Alias of --title')
filesystem.add_option(
'-w', '--no-overwrites',
action='store_true', dest='nooverwrites', default=False,
help='Do not overwrite files')
filesystem.add_option(
'-c', '--continue',
action='store_true', dest='continue_dl', default=True,
help='Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
filesystem.add_option(
'--no-continue',
action='store_false', dest='continue_dl',
help='Do not resume partially downloaded files (restart from beginning)')
filesystem.add_option(
'--no-part',
action='store_true', dest='nopart', default=False,
help='Do not use .part files - write directly into output file')
filesystem.add_option(
'--no-mtime',
action='store_false', dest='updatetime', default=True,
help='Do not use the Last-modified header to set the file modification time')
filesystem.add_option(
'--write-description',
action='store_true', dest='writedescription', default=False,
help='Write video description to a .description file')
filesystem.add_option(
'--write-info-json',
action='store_true', dest='writeinfojson', default=False,
help='Write video metadata to a .info.json file')
filesystem.add_option(
'--write-annotations',
action='store_true', dest='writeannotations', default=False,
help='Write video annotations to a .annotations.xml file')
filesystem.add_option(
'--write-playlists',
action='store_true', dest='writeplaylists', default=False,
help='Write playlists to .m3u files')
filesystem.add_option(
'--load-info',
dest='load_info_filename', metavar='FILE',
help='JSON file containing the video information (created with the "--write-info-json" option)')
filesystem.add_option(
'--cookies',
dest='cookiefile', metavar='FILE',
help='File to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir',
action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
thumbnail = optparse.OptionGroup(parser, 'Thumbnail images')
thumbnail.add_option(
'--write-thumbnail',
action='store_true', dest='writethumbnail', default=False,
help='Write thumbnail image to disk')
thumbnail.add_option(
'--write-all-thumbnails',
action='store_true', dest='write_all_thumbnails', default=False,
help='Write all thumbnail image formats to disk')
thumbnail.add_option(
'--list-thumbnails',
action='store_true', dest='list_thumbnails', default=False,
help='Simulate and list all available thumbnail formats')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
postproc.add_option(
'-x', '--extract-audio',
action='store_true', dest='extractaudio', default=False,
help='Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option(
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
postproc.add_option(
'--audio-quality', metavar='QUALITY',
dest='audioquality', default='5',
help='Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
postproc.add_option(
'--recode-video',
metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)')
postproc.add_option(
'--postprocessor-args',
dest='postprocessor_args', metavar='ARGS',
help='Give these arguments to the postprocessor')
postproc.add_option(
'-k', '--keep-video',
action='store_true', dest='keepvideo', default=False,
help='Keep the video file on disk after the post-processing; the video is erased by default')
postproc.add_option(
'--no-post-overwrites',
action='store_true', dest='nopostoverwrites', default=False,
help='Do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option(
'--embed-subs',
action='store_true', dest='embedsubtitles', default=False,
help='Embed subtitles in the video (only for mkv and mp4 videos)')
postproc.add_option(
'--embed-thumbnail',
action='store_true', dest='embedthumbnail', default=False,
help='Embed thumbnail in the audio as cover art')
postproc.add_option(
'--add-metadata',
action='store_true', dest='addmetadata', default=False,
help='Write metadata to the video file')
postproc.add_option(
'--metadata-from-title',
metavar='FORMAT', dest='metafromtitle',
help='Parse additional metadata like song title / artist from the video title. '
'The format syntax is the same as --output, '
'the parsed parameters replace existing values. '
'Additional templates: %(album)s, %(artist)s. '
'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
'"Coldplay - Paradise"')
postproc.add_option(
'--xattrs',
action='store_true', dest='xattrs', default=False,
help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option(
'--fixup',
metavar='POLICY', dest='fixup', default='detect_or_warn',
help='Automatically correct known faults of the file. '
'One of never (do nothing), warn (only emit a warning), '
'detect_or_warn (the default; fix file if we can, warn otherwise)')
postproc.add_option(
'--prefer-avconv',
action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
postproc.add_option(
'--prefer-ffmpeg',
action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors')
postproc.add_option(
'--ffmpeg-location', '--avconv-location', metavar='PATH',
dest='ffmpeg_location',
help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.')
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
'--convert-subtitles', '--convert-subs',
metavar='FORMAT', dest='convertsubtitles', default=None,
help='Convert the subtitles to other format (currently supported: srt|ass|vtt)')
parser.add_option_group(general)
parser.add_option_group(network)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(thumbnail)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
def compat_conf(conf):
if sys.version_info < (3,):
return [a.decode(preferredencoding(), 'replace') for a in conf]
return conf
command_line_conf = compat_conf(sys.argv[1:])
if '--ignore-config' in command_line_conf:
system_conf = []
user_conf = []
else:
system_conf = compat_conf(_readOptions('/etc/youtube-dl.conf'))
if '--ignore-config' in system_conf:
user_conf = []
else:
user_conf = compat_conf(_readUserConf())
argv = system_conf + user_conf + command_line_conf
opts, args = parser.parse_args(argv)
if opts.verbose:
write_string('[debug] System config: ' + repr(_hide_login_info(system_conf)) + '\n')
write_string('[debug] User config: ' + repr(_hide_login_info(user_conf)) + '\n')
write_string('[debug] Command-line args: ' + repr(_hide_login_info(command_line_conf)) + '\n')
return parser, opts, args
|
|
#!/usr/bin/env python
# Runtime (690, 130, 128, 128): 1.5 hours
from __future__ import print_function
from __future__ import absolute_import
import numpy
import h5py
import gc
from .cloud_objects import Cloudlet, Cluster
from .utility_functions import index_to_zyx, zyx_to_index
saveit = True
#--------------------------
def make_spatial_cloudlet_connections(cloudlets, MC):
# Find all the cloudlets which have adjacent cores or plumes
# Store the information in the cloudlet.adjacent dict.
# This function does this by constructing a 3d array of the
# cloudlet numbers of each core and plume point
# Then it pulls the edge points from these 3d arrays
# to see if the cloudlets are bordering other cloudlets
condensed_array = -1*numpy.ones((MC['nz']*MC['ny']*MC['nx'],), numpy.int)
plume_array = -1*numpy.ones((MC['nz']*MC['ny']*MC['nx'],), numpy.int)
# label the cloud core and plume points using the list index of the
# cloudlet
for cloudlet in cloudlets:
condensed_array[cloudlet.condensed_mask()] = cloudlet.id
plume_array[cloudlet.plume_mask()] = cloudlet.id
for cloudlet in cloudlets:
# Find all cloudlets that have adjacent clouds
adjacent_condensed = condensed_array[cloudlet.condensed_halo()]
adjacent_condensed = adjacent_condensed[adjacent_condensed > -1]
if len(adjacent_condensed) > 0:
volumes = numpy.bincount(adjacent_condensed)
adjacent_condensed = numpy.unique(adjacent_condensed)
for id in adjacent_condensed:
cloudlet.adjacent['condensed'].append((volumes[id], cloudlets[id]))
cloudlet.adjacent['condensed'].sort()
cloudlet.adjacent['condensed'].reverse()
# Find all cloudlets that have adjacent plumes
adjacent_plumes = plume_array[cloudlet.plume_halo()]
adjacent_plumes = adjacent_plumes[adjacent_plumes > -1]
if len(adjacent_plumes) > 0:
volumes = numpy.bincount(adjacent_plumes)
adjacent_plumes = numpy.unique(adjacent_plumes)
for id in adjacent_plumes:
cloudlet.adjacent['plume'].append((volumes[id], cloudlets[id]))
cloudlet.adjacent['plume'].sort()
cloudlet.adjacent['plume'].reverse()
return cloudlets
#-----------------
def advect_indexes(indexes, u, v, w, MC):
K_J_I = index_to_zyx(indexes, MC)
K_J_I[0, :] = K_J_I[0, :] - w
K_J_I[1, :] = K_J_I[1, :] - v
K_J_I[2, :] = K_J_I[2, :] - u
K_J_I[0, K_J_I[0, :] >= MC['nz']] = MC['nz']-1
K_J_I[0, K_J_I[0, :] < 0] = 0
K_J_I[1, :] = K_J_I[1, :] % MC['ny']
K_J_I[2, :] = K_J_I[2, :] % MC['nx']
advected_indexes = zyx_to_index(K_J_I[0,:], K_J_I[1,:], K_J_I[2,:], MC)
return advected_indexes
def count_overlaps(key, overlaps, cloudlet):
bin_count = numpy.bincount(overlaps)
indexes = numpy.arange(len(bin_count))
indexes = indexes[bin_count > 0]
bin_count = bin_count[bin_count > 0]
for n, index in enumerate(indexes):
cloudlet.overlap[key].append( (bin_count[n], index) )
def make_temporal_connections(cloudlets, old_clusters, MC):
# For each cloudlet, find the previous time's
# cluster that overlaps the cloudlet the most
condensed_array = -1*numpy.ones((MC['nz']*MC['ny']*MC['nx'],), numpy.int)
plume_array = -1*numpy.ones((MC['nz']*MC['ny']*MC['nx'],), numpy.int)
# label the cloud core and plume points using the list index of the
# cloud cluster
for id, cluster in old_clusters.iteritems():
condensed_array[cluster.condensed_mask()] = id
plume_array[cluster.plume_mask()] = id
for cloudlet in cloudlets:
# Find cloud-cloud overlaps
if cloudlet.has_condensed():
# Correct for cloud advection
advected_condensed_mask = advect_indexes(cloudlet.condensed_mask(),
cloudlet.u['condensed'],
cloudlet.v['condensed'],
cloudlet.w['condensed'],
MC)
# Get indexes of previous cores from the advected array
overlapping_condenseds = condensed_array[advected_condensed_mask]
overlapping_condenseds = overlapping_condenseds[overlapping_condenseds > -1]
if len(overlapping_condenseds) > 0:
count_overlaps('condensed->condensed', overlapping_condenseds, cloudlet)
# Find core-plume overlaps
overlapping_plumes = plume_array[advected_condensed_mask]
overlapping_plumes = overlapping_plumes[overlapping_plumes > -1]
if len(overlapping_plumes) > 0:
count_overlaps('plume->condensed', overlapping_plumes, cloudlet)
# Find plume-core overlaps
advected_plume_mask = advect_indexes(cloudlet.plume_mask(),
cloudlet.u['plume'],
cloudlet.v['plume'],
cloudlet.w['plume'],
MC)
overlapping_condenseds = condensed_array[advected_plume_mask]
overlapping_condenseds = overlapping_condenseds[overlapping_condenseds > -1]
if len(overlapping_condenseds) > 0:
count_overlaps('condensed->plume', overlapping_condenseds, cloudlet)
# Find plume-plume overlaps
overlapping_plumes = plume_array[advected_plume_mask]
overlapping_plumes = overlapping_plumes[overlapping_plumes > -1]
if len(overlapping_plumes) > 0:
count_overlaps('plume->plume', overlapping_plumes, cloudlet)
for item in cloudlet.overlap:
cloudlet.overlap[item].sort()
cloudlet.overlap[item].reverse()
#---------------------
def create_new_clusters(cloudlets, clusters, max_id, MC):
core_list = []
condensed_list = []
plume_list = []
for cloudlet in cloudlets:
if cloudlet.has_core():
core_list.append(cloudlet)
elif cloudlet.has_condensed():
condensed_list.append(cloudlet)
else:
plume_list.append(cloudlet)
n = 0
# Make clusters out of the cloudlets with core points
while core_list:
cloudlet = core_list.pop()
cluster = Cluster(max_id, [cloudlet], MC)
cluster.events.append('NCOR')
# Add cloudlets with adjactent clouds to the cluster
# Adding cloudlets may bring more cloudlets into cloud
# contact with the cluster, so we loop until acloud is empty
acondenseds = cluster.adjacent_cloudlets('condensed')
while acondenseds:
n = n + len(acondenseds)
for cloudlet in acondenseds:
try:
core_list.remove( cloudlet )
except:
cloud_list.remove( cloudlet )
cluster.add_cloudlets( acondenseds )
acondenseds = cluster.adjacent_cloudlets('condensed')
clusters[max_id] = cluster
max_id = max_id + 1
# Make clusters out of the cloudlets without core points
while condensed_list:
cloudlet = condensed_list.pop()
cluster = Cluster(max_id, [cloudlet], MC)
cluster.events.append('NCLD')
if (len(cluster.adjacent_cloudlets('condensed')) > 0): print(" condensed connection ERROR")
# Make clusters out of the cloudlets without core points
while plume_list:
cloudlet = plume_list.pop()
cluster = Cluster(max_id, [cloudlet], MC)
cluster.events.append('NP')
clusters[max_id] = cluster
max_id = max_id + 1
return clusters
#---------------------
def associate_cloudlets_with_previous_clusters(cloudlets, old_clusters, MC):
clusters = {}
new_cloudlets = []
for cloudlet in cloudlets:
back_conns = set()
max_conn = -1
if cloudlet.overlap['condensed->condensed']:
conns = cloudlet.overlap['condensed->condensed']
max_conn = conns[0][1]
conns = conns[1:]
for conn in conns:
back_conns.add(conn[1])
elif cloudlet.overlap['plume->condensed']:
conns = cloudlet.overlap['plume->condensed']
for conn in conns:
if not old_clusters[conn[1]].has_condensed():
if max_conn > -1:
back_conns.add(max_conn)
else:
max_conn = conn[1]
elif cloudlet.overlap['plume->plume']:
if not cloudlet.has_condensed():
conns = cloudlet.overlap['plume->plume']
for conn in conns:
if not old_clusters[conn[1]].has_condensed():
if max_conn > -1:
back_conns.add(max_conn)
else:
max_conn = conn[1]
# If there are back connections, add the cloudlet to
# a cluster
if max_conn > -1:
if max_conn in clusters:
clusters[max_conn].add_cloudlet(cloudlet)
else:
clusters[max_conn] = Cluster(max_conn, [cloudlet], MC)
clusters[max_conn].events.append('O%d' % max_conn)
clusters[max_conn].past_connections.add(max_conn)
for conn in back_conns:
clusters[max_conn].merge_connections.add(conn)
clusters[max_conn].events.append('M%d' % conn)
else:
new_cloudlets.append( cloudlet )
return new_cloudlets, clusters
#---
def check_for_adjacent_cloudlets(new_cloudlets, clusters):
n = 0
# Checks the clusters list to see if any of the cloudlets which did not
# overlap previous clusters are connected to the current clusters
for cluster in clusters.values():
condensed_connections = cluster.adjacent_cloudlets('condensed')
while condensed_connections:
n = n + 1
connected_cloudlet = condensed_connections.pop()
if connected_cloudlet in new_cloudlets:
cluster.add_cloudlet( connected_cloudlet )
new_cloudlets.remove( connected_cloudlet )
condensed_connections = cluster.adjacent_cloudlets('condensed')
#---
def split_clusters(clusters, max_id, MC):
for cluster in clusters.values():
groups = cluster.connected_cloudlet_groups()
if len(groups) > 1:
sizes = []
for group in groups:
size = 0
for cloudlet in group:
size = size + cloudlet.volume
sizes.append( (size, group) )
sizes.sort()
# Turn the smaller groups into new clusters
for size, group in sizes[:-1]:
cluster.remove_cloudlets(group)
new_cluster = Cluster(max_id, group, MC)
new_cluster.events.append('S%d' % cluster.id)
new_cluster.split_connections.add(cluster.id)
clusters[max_id] = new_cluster
max_id = max_id + 1
return max_id
#----------
def make_clusters(cloudlets, old_clusters, MC):
# make_clusters generates a dictionary of clusters
max_id = max(old_clusters.keys()) + 1
# Find the horizontal connections between cloudlets
make_spatial_cloudlet_connections(cloudlets, MC)
# associate cloudlets with previous timestep clusters
# cloudlets that can't be associated are assumed to be newly created
new_cloudlets, current_clusters = associate_cloudlets_with_previous_clusters(cloudlets, old_clusters, MC)
# See if any of the new cloudlets are touching a cluster
check_for_adjacent_cloudlets(new_cloudlets, current_clusters)
# See if the cloudlets in a cluster are no longer touching
max_id = split_clusters(current_clusters, max_id, MC)
# Create new clusters from any leftover new cloudlets
final_clusters = create_new_clusters(new_cloudlets, current_clusters, max_id, MC)
return final_clusters
#---------------------
def load_cloudlets(t, MC):
items = ['core', 'condensed', 'plume', 'u_condensed', 'v_condensed', \
'w_condensed', 'u_plume', 'v_plume', 'w_plume']
with h5py.File('hdf5/cloudlets_%08g.h5' % t, 'r') as cloudlets:
cloudlet = {}
result = []
n = 0
# TODO: Parallelize
for i in cloudlets:
if ((len(cloudlets[i]['plume']) > 7)
or (len(cloudlets[i]['condensed']) > 1)
or (len(cloudlets[i]['core']) > 0)):
# FIXME: The following loop takes a long time
for var in items:
cloudlet[var] = cloudlets[i][var][...]
result.append( Cloudlet( n, t, cloudlet, MC ) )
n = n + 1
return result
def save_clusters(clusters, t):
new_clusters = {}
with h5py.File('hdf5/clusters_%08g.h5' % t, "w") as f:
# TODO: Parallelize
for id, clust in clusters.iteritems():
grp = f.create_group(str(id))
dset = grp.create_dataset('past_connections', data=numpy.array(list(clust.past_connections)))
dset = grp.create_dataset('merge_connections', data=numpy.array(list(clust.merge_connections)))
dset = grp.create_dataset('split_connections', data=numpy.array(list(clust.split_connections)))
dset = grp.create_dataset('events', data=numpy.array(list(clust.events)))
dset = grp.create_dataset('core', data=clust.core_mask())
dset = grp.create_dataset('condensed', data=clust.condensed_mask())
dset = grp.create_dataset('plume', data=clust.plume_mask())
# NOTE: Ignore cluster_objects
#cPickle.dump(clusters, open('pkl/cluster_objects_%08g.pkl' % t, 'wb'))
@profile
def cluster_cloudlets(MC):
print("cluster cloudlets; time step: 0")
cloudlets = load_cloudlets(0, MC)
make_spatial_cloudlet_connections( cloudlets, MC )
new_clusters = create_new_clusters(cloudlets, {}, 0, MC)
print("\t%d clusters" % len(new_clusters))
save_clusters(new_clusters, 0)
for t in range(1, MC['nt']):
print("cluster cloudlets; time step: %d" % t)
old_clusters = new_clusters
cloudlets = load_cloudlets(t, MC)
# Finds the ids of all the previous timestep's cloudlets that overlap
# the current timestep's cloudlets.
make_temporal_connections(cloudlets, old_clusters, MC)
# Uses the previous timestep overlap info to group
# current cloudlets into clusters.
new_clusters = make_clusters(cloudlets, old_clusters, MC)
print("\t%d clusters" % len(new_clusters))
save_clusters(new_clusters, t)
gc.collect()
if __name__ == "__main__":
main()
|
|
"""Support for the Xiaomi vacuum cleaner robot."""
import asyncio
from functools import partial
import logging
from miio import DeviceException, Vacuum # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA,
DOMAIN,
PLATFORM_SCHEMA,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumDevice,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_TOKEN,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Vacuum cleaner"
DATA_KEY = "vacuum.xiaomi_miio"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_MOVE_REMOTE_CONTROL = "xiaomi_remote_control_move"
SERVICE_MOVE_REMOTE_CONTROL_STEP = "xiaomi_remote_control_move_step"
SERVICE_START_REMOTE_CONTROL = "xiaomi_remote_control_start"
SERVICE_STOP_REMOTE_CONTROL = "xiaomi_remote_control_stop"
SERVICE_CLEAN_ZONE = "xiaomi_clean_zone"
FAN_SPEEDS = {"Quiet": 38, "Balanced": 60, "Turbo": 77, "Max": 90, "Gentle": 105}
ATTR_CLEAN_START = "clean_start"
ATTR_CLEAN_STOP = "clean_stop"
ATTR_CLEANING_TIME = "cleaning_time"
ATTR_DO_NOT_DISTURB = "do_not_disturb"
ATTR_DO_NOT_DISTURB_START = "do_not_disturb_start"
ATTR_DO_NOT_DISTURB_END = "do_not_disturb_end"
ATTR_MAIN_BRUSH_LEFT = "main_brush_left"
ATTR_SIDE_BRUSH_LEFT = "side_brush_left"
ATTR_FILTER_LEFT = "filter_left"
ATTR_SENSOR_DIRTY_LEFT = "sensor_dirty_left"
ATTR_CLEANING_COUNT = "cleaning_count"
ATTR_CLEANED_TOTAL_AREA = "total_cleaned_area"
ATTR_CLEANING_TOTAL_TIME = "total_cleaning_time"
ATTR_ERROR = "error"
ATTR_RC_DURATION = "duration"
ATTR_RC_ROTATION = "rotation"
ATTR_RC_VELOCITY = "velocity"
ATTR_STATUS = "status"
ATTR_ZONE_ARRAY = "zone"
ATTR_ZONE_REPEATER = "repeats"
VACUUM_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids})
SERVICE_SCHEMA_REMOTE_CONTROL = VACUUM_SERVICE_SCHEMA.extend(
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
}
)
SERVICE_SCHEMA_CLEAN_ZONE = VACUUM_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[vol.Coerce(int), vol.Coerce(int), vol.Coerce(int), vol.Coerce(int)]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
}
)
SERVICE_SCHEMA_CLEAN_ZONE = VACUUM_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[vol.Coerce(int), vol.Coerce(int), vol.Coerce(int), vol.Coerce(int)]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
}
)
SERVICE_TO_METHOD = {
SERVICE_START_REMOTE_CONTROL: {"method": "async_remote_control_start"},
SERVICE_STOP_REMOTE_CONTROL: {"method": "async_remote_control_stop"},
SERVICE_MOVE_REMOTE_CONTROL: {
"method": "async_remote_control_move",
"schema": SERVICE_SCHEMA_REMOTE_CONTROL,
},
SERVICE_MOVE_REMOTE_CONTROL_STEP: {
"method": "async_remote_control_move_step",
"schema": SERVICE_SCHEMA_REMOTE_CONTROL,
},
SERVICE_CLEAN_ZONE: {
"method": "async_clean_zone",
"schema": SERVICE_SCHEMA_CLEAN_ZONE,
},
}
SUPPORT_XIAOMI = (
SUPPORT_STATE
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
| SUPPORT_START
)
STATE_CODE_TO_STATE = {
2: STATE_IDLE,
3: STATE_IDLE,
5: STATE_CLEANING,
6: STATE_RETURNING,
7: STATE_CLEANING,
8: STATE_DOCKED,
9: STATE_ERROR,
10: STATE_PAUSED,
11: STATE_CLEANING,
12: STATE_ERROR,
15: STATE_RETURNING,
16: STATE_CLEANING,
17: STATE_CLEANING,
18: STATE_CLEANING,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Xiaomi vacuum cleaner robot platform."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
vacuum = Vacuum(host, token)
mirobo = MiroboVacuum(name, vacuum)
hass.data[DATA_KEY][host] = mirobo
async_add_entities([mirobo], update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on MiroboVacuum."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_vacuums = [
vac
for vac in hass.data[DATA_KEY].values()
if vac.entity_id in entity_ids
]
else:
target_vacuums = hass.data[DATA_KEY].values()
update_tasks = []
for vacuum in target_vacuums:
await getattr(vacuum, method["method"])(**params)
for vacuum in target_vacuums:
update_coro = vacuum.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
await asyncio.wait(update_tasks)
for vacuum_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[vacuum_service].get("schema", VACUUM_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, vacuum_service, async_service_handler, schema=schema
)
class MiroboVacuum(StateVacuumDevice):
"""Representation of a Xiaomi Vacuum cleaner robot."""
def __init__(self, name, vacuum):
"""Initialize the Xiaomi vacuum cleaner robot handler."""
self._name = name
self._vacuum = vacuum
self.vacuum_state = None
self._available = False
self.consumable_state = None
self.clean_history = None
self.dnd_state = None
self.last_clean = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the status of the vacuum cleaner."""
if self.vacuum_state is not None:
# The vacuum reverts back to an idle state after erroring out.
# We want to keep returning an error until it has been cleared.
if self.vacuum_state.got_error:
return STATE_ERROR
try:
return STATE_CODE_TO_STATE[int(self.vacuum_state.state_code)]
except KeyError:
_LOGGER.error(
"STATE not supported: %s, state_code: %s",
self.vacuum_state.state,
self.vacuum_state.state_code,
)
return None
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.vacuum_state is not None:
return self.vacuum_state.battery
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
if self.vacuum_state is not None:
speed = self.vacuum_state.fanspeed
if speed in FAN_SPEEDS.values():
return [key for key, value in FAN_SPEEDS.items() if value == speed][0]
return speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(sorted(FAN_SPEEDS.keys(), key=lambda s: FAN_SPEEDS[s]))
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
attrs = {}
if self.vacuum_state is not None:
attrs.update(
{
ATTR_DO_NOT_DISTURB: STATE_ON
if self.dnd_state.enabled
else STATE_OFF,
ATTR_DO_NOT_DISTURB_START: str(self.dnd_state.start),
ATTR_DO_NOT_DISTURB_END: str(self.dnd_state.end),
# Not working --> 'Cleaning mode':
# STATE_ON if self.vacuum_state.in_cleaning else STATE_OFF,
ATTR_CLEANING_TIME: int(
self.vacuum_state.clean_time.total_seconds() / 60
),
ATTR_CLEANED_AREA: int(self.vacuum_state.clean_area),
ATTR_CLEANING_COUNT: int(self.clean_history.count),
ATTR_CLEANED_TOTAL_AREA: int(self.clean_history.total_area),
ATTR_CLEANING_TOTAL_TIME: int(
self.clean_history.total_duration.total_seconds() / 60
),
ATTR_MAIN_BRUSH_LEFT: int(
self.consumable_state.main_brush_left.total_seconds() / 3600
),
ATTR_SIDE_BRUSH_LEFT: int(
self.consumable_state.side_brush_left.total_seconds() / 3600
),
ATTR_FILTER_LEFT: int(
self.consumable_state.filter_left.total_seconds() / 3600
),
ATTR_SENSOR_DIRTY_LEFT: int(
self.consumable_state.sensor_dirty_left.total_seconds() / 3600
),
ATTR_STATUS: str(self.vacuum_state.state),
}
)
if self.last_clean:
attrs[ATTR_CLEAN_START] = self.last_clean.start
attrs[ATTR_CLEAN_STOP] = self.last_clean.end
if self.vacuum_state.got_error:
attrs[ATTR_ERROR] = self.vacuum_state.error
return attrs
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_XIAOMI
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a vacuum command handling error messages."""
try:
await self.hass.async_add_executor_job(partial(func, *args, **kwargs))
return True
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def async_start(self):
"""Start or resume the cleaning task."""
await self._try_command(
"Unable to start the vacuum: %s", self._vacuum.resume_or_start
)
async def async_pause(self):
"""Pause the cleaning task."""
await self._try_command("Unable to set start/pause: %s", self._vacuum.pause)
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
await self._try_command("Unable to stop: %s", self._vacuum.stop)
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed.capitalize() in FAN_SPEEDS:
fan_speed = FAN_SPEEDS[fan_speed.capitalize()]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error(
"Fan speed step not recognized (%s). " "Valid speeds are: %s",
exc,
self.fan_speed_list,
)
return
await self._try_command(
"Unable to set fan speed: %s", self._vacuum.set_fan_speed, fan_speed
)
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
await self._try_command("Unable to return home: %s", self._vacuum.home)
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
await self._try_command(
"Unable to start the vacuum for a spot clean-up: %s", self._vacuum.spot
)
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner."""
await self._try_command("Unable to locate the botvac: %s", self._vacuum.find)
async def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
await self._try_command(
"Unable to send command to the vacuum: %s",
self._vacuum.raw_command,
command,
params,
)
async def async_remote_control_start(self):
"""Start remote control mode."""
await self._try_command(
"Unable to start remote control the vacuum: %s", self._vacuum.manual_start
)
async def async_remote_control_stop(self):
"""Stop remote control mode."""
await self._try_command(
"Unable to stop remote control the vacuum: %s", self._vacuum.manual_stop
)
async def async_remote_control_move(
self, rotation: int = 0, velocity: float = 0.3, duration: int = 1500
):
"""Move vacuum with remote control mode."""
await self._try_command(
"Unable to move with remote control the vacuum: %s",
self._vacuum.manual_control,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_remote_control_move_step(
self, rotation: int = 0, velocity: float = 0.2, duration: int = 1500
):
"""Move vacuum one step with remote control mode."""
await self._try_command(
"Unable to remote control the vacuum: %s",
self._vacuum.manual_control_once,
velocity=velocity,
rotation=rotation,
duration=duration,
)
def update(self):
"""Fetch state from the device."""
try:
state = self._vacuum.status()
self.vacuum_state = state
self.consumable_state = self._vacuum.consumable_status()
self.clean_history = self._vacuum.clean_history()
self.last_clean = self._vacuum.last_clean_details()
self.dnd_state = self._vacuum.dnd_status()
self._available = True
except OSError as exc:
_LOGGER.error("Got OSError while fetching the state: %s", exc)
except DeviceException as exc:
_LOGGER.warning("Got exception while fetching the state: %s", exc)
async def async_clean_zone(self, zone, repeats=1):
"""Clean selected area for the number of repeats indicated."""
for _zone in zone:
_zone.append(repeats)
_LOGGER.debug("Zone with repeats: %s", zone)
try:
await self.hass.async_add_executor_job(self._vacuum.zoned_clean, zone)
except (OSError, DeviceException) as exc:
_LOGGER.error("Unable to send zoned_clean command to the vacuum: %s", exc)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import uuid
import mock
from mock import call
import requests
import six
import yaml
from mistralclient.api.base import APIException
from mistralclient.api.v2 import action_executions
from mistralclient.api.v2 import executions
from mistralclient.api.v2 import workbooks
from mistralclient.api.v2 import workflows
from oslo_config import cfg
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
# Set defaults for retry options.
cfg.CONF.set_override('retry_exp_msec', 100, group='mistral')
cfg.CONF.set_override('retry_exp_max_msec', 200, group='mistral')
cfg.CONF.set_override('retry_stop_max_msec', 200, group='mistral')
import st2common.bootstrap.runnersregistrar as runners_registrar
from st2actions.handlers.mistral import MistralCallbackHandler
from st2actions.handlers.mistral import STATUS_MAP as mistral_status_map
from st2actions.runners.localrunner import LocalShellRunner
from st2actions.runners.mistral.v2 import MistralRunner
from st2common.constants import action as action_constants
from st2common.models.api.action import ActionAPI
from st2common.models.api.notification import NotificationsHelper
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.action import Action
from st2common.persistence.liveaction import LiveAction
from st2common.services import action as action_service
from st2common.transport.liveaction import LiveActionPublisher
from st2common.transport.publishers import CUDPublisher
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
from tests.unit.base import MockLiveActionPublisher
TEST_FIXTURES = {
'workflows': [
'workbook_v2.yaml',
'workbook_v2_many_workflows.yaml',
'workbook_v2_many_workflows_no_default.yaml',
'workflow_v2.yaml',
'workflow_v2_many_workflows.yaml'
],
'actions': [
'workbook_v2.yaml',
'workbook_v2_many_workflows.yaml',
'workbook_v2_many_workflows_no_default.yaml',
'workflow_v2.yaml',
'workflow_v2_many_workflows.yaml',
'workbook_v2_name_mismatch.yaml',
'workflow_v2_name_mismatch.yaml',
'local.yaml'
]
}
PACK = 'generic'
LOADER = FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
MISTRAL_EXECUTION = {'id': str(uuid.uuid4()), 'state': 'RUNNING', 'workflow_name': None}
# Workbook with a single workflow
WB1_YAML_FILE_NAME = TEST_FIXTURES['workflows'][0]
WB1_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB1_YAML_FILE_NAME)
WB1_SPEC = FIXTURES['workflows'][WB1_YAML_FILE_NAME]
WB1_YAML = yaml.safe_dump(WB1_SPEC, default_flow_style=False)
WB1_NAME = '%s.%s' % (PACK, WB1_YAML_FILE_NAME.replace('.yaml', ''))
WB1 = workbooks.Workbook(None, {'name': WB1_NAME, 'definition': WB1_YAML})
WB1_OLD = workbooks.Workbook(None, {'name': WB1_NAME, 'definition': ''})
WB1_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WB1_EXEC['workflow_name'] = WB1_NAME
# Workbook with many workflows
WB2_YAML_FILE_NAME = TEST_FIXTURES['workflows'][1]
WB2_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB2_YAML_FILE_NAME)
WB2_SPEC = FIXTURES['workflows'][WB2_YAML_FILE_NAME]
WB2_YAML = yaml.safe_dump(WB2_SPEC, default_flow_style=False)
WB2_NAME = '%s.%s' % (PACK, WB2_YAML_FILE_NAME.replace('.yaml', ''))
WB2 = workbooks.Workbook(None, {'name': WB2_NAME, 'definition': WB2_YAML})
WB2_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WB2_EXEC['workflow_name'] = WB2_NAME
# Workbook with many workflows but no default workflow is defined
WB3_YAML_FILE_NAME = TEST_FIXTURES['workflows'][2]
WB3_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB3_YAML_FILE_NAME)
WB3_SPEC = FIXTURES['workflows'][WB3_YAML_FILE_NAME]
WB3_YAML = yaml.safe_dump(WB3_SPEC, default_flow_style=False)
WB3_NAME = '%s.%s' % (PACK, WB3_YAML_FILE_NAME.replace('.yaml', ''))
WB3 = workbooks.Workbook(None, {'name': WB3_NAME, 'definition': WB3_YAML})
WB3_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WB3_EXEC['workflow_name'] = WB3_NAME
# Non-workbook with a single workflow
WF1_YAML_FILE_NAME = TEST_FIXTURES['workflows'][3]
WF1_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF1_YAML_FILE_NAME)
WF1_SPEC = FIXTURES['workflows'][WF1_YAML_FILE_NAME]
WF1_YAML = yaml.safe_dump(WF1_SPEC, default_flow_style=False)
WF1_NAME = '%s.%s' % (PACK, WF1_YAML_FILE_NAME.replace('.yaml', ''))
WF1 = workflows.Workflow(None, {'name': WF1_NAME, 'definition': WF1_YAML})
WF1_OLD = workflows.Workflow(None, {'name': WF1_NAME, 'definition': ''})
WF1_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WF1_EXEC['workflow_name'] = WF1_NAME
WF1_EXEC_PAUSED = copy.deepcopy(WF1_EXEC)
WF1_EXEC_PAUSED['state'] = 'PAUSED'
# Non-workbook with a many workflows
WF2_YAML_FILE_NAME = TEST_FIXTURES['workflows'][4]
WF2_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF2_YAML_FILE_NAME)
WF2_SPEC = FIXTURES['workflows'][WF2_YAML_FILE_NAME]
WF2_YAML = yaml.safe_dump(WF2_SPEC, default_flow_style=False)
WF2_NAME = '%s.%s' % (PACK, WF2_YAML_FILE_NAME.replace('.yaml', ''))
WF2 = workflows.Workflow(None, {'name': WF2_NAME, 'definition': WF2_YAML})
WF2_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WF2_EXEC['workflow_name'] = WF2_NAME
# Action executions requirements
ACTION_PARAMS = {'friend': 'Rocky'}
NON_EMPTY_RESULT = 'non-empty'
@mock.patch.object(CUDPublisher, 'publish_update', mock.MagicMock(return_value=None))
@mock.patch.object(CUDPublisher, 'publish_create',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_create))
@mock.patch.object(LiveActionPublisher, 'publish_state',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_state))
class MistralRunnerTest(DbTestCase):
@classmethod
def setUpClass(cls):
super(MistralRunnerTest, cls).setUpClass()
runners_registrar.register_runner_types()
for _, fixture in six.iteritems(FIXTURES['actions']):
instance = ActionAPI(**fixture)
Action.add_or_update(ActionAPI.to_model(instance))
def setUp(self):
super(MistralRunnerTest, self).setUp()
cfg.CONF.set_override('api_url', 'http://0.0.0.0:9101', group='auth')
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
workflow_input = copy.deepcopy(ACTION_PARAMS)
workflow_input.update({'count': '3'})
env = {
'st2_execution_id': str(execution.id),
'st2_liveaction_id': str(liveaction.id),
'st2_action_api_url': 'http://0.0.0.0:9101/v1',
'__actions': {
'st2.action': {
'st2_context': {
'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions',
'parent': {
'execution_id': str(execution.id)
},
'notify': {},
'skip_notify_tasks': []
}
}
}
}
executions.ExecutionManager.create.assert_called_with(
WF1_NAME, workflow_input=workflow_input, env=env)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow_with_st2_https(self):
cfg.CONF.set_override('api_url', 'https://0.0.0.0:9101', group='auth')
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
workflow_input = copy.deepcopy(ACTION_PARAMS)
workflow_input.update({'count': '3'})
env = {
'st2_execution_id': str(execution.id),
'st2_liveaction_id': str(liveaction.id),
'st2_action_api_url': 'https://0.0.0.0:9101/v1',
'__actions': {
'st2.action': {
'st2_context': {
'endpoint': 'https://0.0.0.0:9101/v1/actionexecutions',
'parent': {
'execution_id': str(execution.id)
},
'notify': {},
'skip_notify_tasks': []
}
}
}
}
executions.ExecutionManager.create.assert_called_with(
WF1_NAME, workflow_input=workflow_input, env=env)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow_with_notifications(self):
notify_data = {'on_complete': {'routes': ['slack'],
'message': '"@channel: Action succeeded."', 'data': {}}}
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, notify=notify_data)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
workflow_input = copy.deepcopy(ACTION_PARAMS)
workflow_input.update({'count': '3'})
env = {
'st2_execution_id': str(execution.id),
'st2_liveaction_id': str(liveaction.id),
'st2_action_api_url': 'http://0.0.0.0:9101/v1',
'__actions': {
'st2.action': {
'st2_context': {
'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions',
'parent': {
'execution_id': str(execution.id)
},
'notify': NotificationsHelper.from_model(liveaction.notify),
'skip_notify_tasks': []
}
}
}
}
executions.ExecutionManager.create.assert_called_with(
WF1_NAME, workflow_input=workflow_input, env=env)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(side_effect=requests.exceptions.ConnectionError('Connection refused')))
def test_launch_workflow_mistral_offline(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Connection refused', liveaction.result['error'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(side_effect=[requests.exceptions.ConnectionError(), []]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow_mistral_retry(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(side_effect=[APIException(error_message='Duplicate entry.'), WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow_duplicate_error(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1_OLD))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
workflows.WorkflowManager, 'update',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_when_workflow_definition_changed(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workbooks.WorkbookManager, 'delete',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_when_workflow_not_exists(self):
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF2))
def test_launch_workflow_with_many_workflows(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF2_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF2_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Multiple workflows is not supported.', liveaction.result['error'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(side_effect=Exception()))
def test_launch_workflow_name_mistmatch(self):
action_ref = 'generic.workflow_v2_name_mismatch'
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=action_ref, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Name of the workflow must be the same', liveaction.result['error'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB1_EXEC)))
def test_launch_workbook(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WB1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB2))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB2))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB2))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB2_EXEC)))
def test_launch_workbook_with_many_workflows(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB2_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WB2_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB2_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB2_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB3))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB3))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB3))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB3_EXEC)))
def test_launch_workbook_with_many_workflows_no_default(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB3_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WB3_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Default workflow cannot be determined.', liveaction.result['error'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB1_OLD))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB1_EXEC)))
def test_launch_when_workbook_definition_changed(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WB1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workflows.WorkflowManager, 'delete',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB1_EXEC)))
def test_launch_when_workbook_not_exists(self):
liveaction = LiveActionDB(action=WB1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(side_effect=Exception()))
def test_launch_workbook_name_mismatch(self):
action_ref = 'generic.workbook_v2_name_mismatch'
MistralRunner.entry_point = mock.PropertyMock(return_value=WB1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=action_ref, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Name of the workbook must be the same', liveaction.result['error'])
def test_callback_handler_status_map(self):
# Ensure all StackStorm status are mapped otherwise leads to zombie workflow.
self.assertListEqual(sorted(mistral_status_map.keys()),
sorted(action_constants.LIVEACTION_STATUSES))
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_callback_handler_with_result_as_text(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED,
'<html></html>')
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_callback_handler_with_result_as_dict(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED, {'a': 1})
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_callback_handler_with_result_as_json_str(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED, '{"a": 1}')
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED, "{'a': 1}")
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_callback_handler_with_result_as_list(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED,
["a", "b", "c"])
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_callback_handler_with_result_as_list_str(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED,
'["a", "b", "c"]')
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_callback(self):
liveaction = LiveActionDB(
action='core.local', parameters={'cmd': 'uname -a'},
callback={
'source': 'mistral',
'url': 'http://localhost:8989/v2/action_executions/12345'
}
)
for status in action_constants.LIVEACTION_COMPLETED_STATES:
expected_mistral_status = mistral_status_map[status]
LocalShellRunner.run = mock.Mock(return_value=(status, NON_EMPTY_RESULT, None))
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, status)
action_executions.ActionExecutionManager.update.assert_called_with(
'12345', state=expected_mistral_status, output=NON_EMPTY_RESULT)
@mock.patch.object(
LocalShellRunner, 'run',
mock.MagicMock(return_value=(action_constants.LIVEACTION_STATUS_RUNNING,
NON_EMPTY_RESULT, None)))
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_callback_incomplete_state(self):
liveaction = LiveActionDB(
action='core.local', parameters={'cmd': 'uname -a'},
callback={
'source': 'mistral',
'url': 'http://localhost:8989/v2/action_executions/12345'
}
)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
self.assertFalse(action_executions.ActionExecutionManager.update.called)
@mock.patch.object(
LocalShellRunner, 'run',
mock.MagicMock(return_value=(action_constants.LIVEACTION_STATUS_SUCCEEDED,
NON_EMPTY_RESULT, None)))
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(side_effect=[
requests.exceptions.ConnectionError(),
None]))
def test_callback_retry(self):
liveaction = LiveActionDB(
action='core.local', parameters={'cmd': 'uname -a'},
callback={
'source': 'mistral',
'url': 'http://localhost:8989/v2/action_executions/12345'
}
)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
calls = [call('12345', state='SUCCESS', output=NON_EMPTY_RESULT) for i in range(0, 2)]
action_executions.ActionExecutionManager.update.assert_has_calls(calls)
@mock.patch.object(
LocalShellRunner, 'run',
mock.MagicMock(return_value=(action_constants.LIVEACTION_STATUS_SUCCEEDED,
NON_EMPTY_RESULT, None)))
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(side_effect=[
requests.exceptions.ConnectionError(),
requests.exceptions.ConnectionError(),
requests.exceptions.ConnectionError(),
requests.exceptions.ConnectionError(),
None]))
def test_callback_retry_exhausted(self):
liveaction = LiveActionDB(
action='core.local', parameters={'cmd': 'uname -a'},
callback={
'source': 'mistral',
'url': 'http://localhost:8989/v2/action_executions/12345'
}
)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
# This test initially setup mock for action_executions.ActionExecutionManager.update
# to fail the first 4 times and return success on the 5th times. The max attempts
# is set to 3. We expect only 3 calls to pass thru the update method.
calls = [call('12345', state='SUCCESS', output=NON_EMPTY_RESULT) for i in range(0, 2)]
action_executions.ActionExecutionManager.update.assert_has_calls(calls)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
@mock.patch.object(
executions.ExecutionManager, 'update',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC_PAUSED)))
def test_cancel(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
requester = cfg.CONF.system_user.user
liveaction, execution = action_service.request_cancellation(liveaction, requester)
executions.ExecutionManager.update.assert_called_with(WF1_EXEC.get('id'), 'PAUSED')
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_CANCELED)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
@mock.patch.object(
executions.ExecutionManager, 'update',
mock.MagicMock(side_effect=[requests.exceptions.ConnectionError(),
executions.Execution(None, WF1_EXEC_PAUSED)]))
def test_cancel_retry(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
requester = cfg.CONF.system_user.user
liveaction, execution = action_service.request_cancellation(liveaction, requester)
executions.ExecutionManager.update.assert_called_with(WF1_EXEC.get('id'), 'PAUSED')
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_CANCELED)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
@mock.patch.object(
executions.ExecutionManager, 'update',
mock.MagicMock(side_effect=requests.exceptions.ConnectionError('Connection refused')))
def test_cancel_retry_exhausted(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, execution = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
requester = cfg.CONF.system_user.user
liveaction, execution = action_service.request_cancellation(liveaction, requester)
calls = [call(WF1_EXEC.get('id'), 'PAUSED') for i in range(0, 2)]
executions.ExecutionManager.update.assert_has_calls(calls)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_CANCELING)
def test_build_context(self):
parent = {
'mistral': {
'workflow_name': 'foo',
'workflow_execution_id': 'b222b934-7473-4cd4-a2ec-e204a8c93848',
'task_tags': None,
'task_name': 'some_fancy_wf_task',
'task_id': '6c7d4334-3e7d-49c6-918d-698e846affaf',
'action_execution_id': '24da5c88-834c-4a65-8b56-4ddbd654eb68'
}
}
current = {
'workflow_name': 'foo.subwf',
'workflow_execution_id': '135e3446-4c89-4afe-821f-6ec6a0849b27'
}
context = MistralRunner._build_mistral_context(parent, current)
self.assertTrue(context is not None)
self.assertTrue('parent' in context['mistral'].keys())
parent_dict = {
'workflow_name': parent['mistral']['workflow_name'],
'workflow_execution_id': parent['mistral']['workflow_execution_id']
}
self.assertDictEqual(context['mistral']['parent'], parent_dict)
self.assertEqual(context['mistral']['workflow_execution_id'],
current['workflow_execution_id'])
parent = None
context = MistralRunner._build_mistral_context(parent, current)
self.assertDictEqual(context['mistral'], current)
|
|
from __future__ import print_function
import threading
import unittest
import mock
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
def make_array(start, shape, dtype):
size = numpy.product(shape, dtype='i')
a = numpy.arange(start, start + size)
a = a.reshape(shape)
a = a.astype(dtype, copy=False)
return a
@testing.parameterize(*testing.product({
'y_shape': [(4,), (0,), (2, 3), ()],
'x_shape': [(3,), (0,), (4, 1), ()],
}))
class TestFunctionNode(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def setUp(self):
y_shape = self.y_shape
x_shape = self.x_shape
y1 = make_array(1, y_shape, numpy.float32)
y2 = make_array(2, y_shape, numpy.float32)
gx1 = chainer.Variable(
make_array(1, x_shape, numpy.float32))
gx2 = None
gy1 = make_array(1, y_shape, numpy.float32)
gy2 = make_array(1, y_shape, numpy.float32)
f = chainer.FunctionNode()
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock(return_value=(y1, y2))
f.forward_gpu = mock.MagicMock()
f.backward = mock.MagicMock(return_value=(gx1, gx2))
self.f = f
self.x1 = make_array(0, x_shape, numpy.float32)
self.x2 = make_array(0, x_shape, numpy.int32)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gx1_orig = chainer.Variable(
make_array(3, x_shape, numpy.float32))
self.gx2_orig = chainer.Variable(
make_array(2, x_shape, numpy.float32))
self.gx1_accum = gx1 + self.gx1_orig
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_gpu(self):
self.x1 = cuda.to_gpu(self.x1)
self.x2 = cuda.to_gpu(self.x2)
self.y1 = cuda.to_gpu(self.y1)
self.y2 = cuda.to_gpu(self.y2)
self.gx1.to_gpu()
self.gx1_orig.to_gpu()
self.gx2_orig.to_gpu()
self.gx1_accum.to_gpu()
self.gy1 = cuda.to_gpu(self.gy1)
self.gy2 = cuda.to_gpu(self.gy2)
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
self.f.backward = mock.MagicMock(return_value=(self.gx1, self.gx2))
def check_forward(self, gpu):
y1, y2 = self.f.forward((self.x1, self.x2))
self.assertEqual(self.f.check_type_forward.call_count, 0)
self.assertEqual(self._get_method('forward', not gpu).call_count, 0)
self._get_method('forward', gpu).assert_called_once_with(
(self.x1, self.x2))
self.assertTrue((cuda.to_cpu(y1) == cuda.to_cpu(self.y1)).all())
self.assertTrue((cuda.to_cpu(y2) == cuda.to_cpu(self.y2)).all())
def test_forward_cpu(self):
self.check_forward(False)
@attr.gpu
def test_forward_gpu(self):
self.setup_gpu()
self.check_forward(True)
def check_backward_accumulate(self, gxs):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
self.f.inputs = (x1.node, x2.node)
gx1, gx2 = self.f.backward_accumulate(
(0, 1), (self.gy1, self.gy2), gxs)
if gxs[0] is None:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1.data))
self.assertIsNone(gx2)
else:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1_accum.data))
numpy.testing.assert_array_equal(cuda.to_cpu(gx2.data),
cuda.to_cpu(self.gx2_orig.data))
def test_backward_accumulate_none_cpu(self):
self.check_backward_accumulate((None, None))
@attr.gpu
def test_backward_accumulate_none_gpu(self):
self.setup_gpu()
self.check_backward_accumulate((None, None))
def test_backward_accumulate_cpu(self):
self.check_backward_accumulate((self.gx1_orig, self.gx2_orig))
@attr.gpu
def test_backward_accumulate_gpu(self):
self.setup_gpu()
self.check_backward_accumulate((self.gx1_orig, self.gx2_orig))
def check_check_type_forward(self):
self.assertEqual(self.f.check_type_forward.call_count, 1)
ts = self.f.check_type_forward.call_args[0][0]
self.assertIsInstance(ts, type_check.LightTypeInfoTuple)
self.assertEqual(len(ts), 2)
t1 = ts[0]
assert t1.shape == self.x_shape
assert t1.dtype == numpy.float32
t2 = ts[1]
assert t2.shape == self.x_shape
assert t2.dtype == numpy.int32
def check_apply(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
x1._node._rank = 1
x2._node._rank = 3
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 4)
self.assertIs(y.creator_node, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator_node.outputs, tuple)
def test_apply_cpu(self):
self.check_apply()
@attr.gpu
def test_apply_gpu(self):
self.setup_gpu()
self.check_apply()
def check_apply_all_ndarray(self):
x1 = self.x1
x2 = self.x2
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
xp = cuda.get_array_module(x1)
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, xp.ndarray)
self.assertFalse(y.requires_grad)
def test_apply_all_ndarray_cpu(self):
self.check_apply_all_ndarray()
@attr.gpu
def test_apply_all_ndarray_gpu(self):
self.setup_gpu()
self.check_apply_all_ndarray()
def check_apply_ndarray(self):
x1 = chainer.Variable(self.x1)
x2 = self.x2
x1._node._rank = 1
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 2)
self.assertIs(y.creator_node, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator_node.outputs, tuple)
def test_apply_ndarray_cpu(self):
self.check_apply_ndarray()
@attr.gpu
def test_apply_ndarray_gpu(self):
self.setup_gpu()
self.check_apply_ndarray()
def check_apply_single_return_value(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
ret, = self.f.apply((x1, x2))
self.assertIsInstance(ret, chainer.Variable)
def test_apply_single_return_value_cpu(self):
self.f.forward_cpu.return_value = (cuda.to_cpu(self.y1),)
self.check_apply_single_return_value()
@attr.gpu
def test_apply_single_return_value_gpu(self):
self.setup_gpu()
self.f.forward_gpu.return_value = (cuda.to_gpu(self.y1),)
self.check_apply_single_return_value()
def _get_f(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
y1, y2 = self.f.apply((x1, x2))
f = y1.creator_node
# To test weak refernece, return only x1 and y1.
# x2 and y2 are deleted by the garbage collector
return f, x1, y1
def test_unchain(self):
f, _x1, _y1 = self._get_f()
y1, y2 = f.outputs
f.unchain()
# As _y1 is alive, this weak ref is also alive
y1_ref = y1()
self.assertIsNotNone(y1_ref)
self.assertIsNone(y1_ref.creator)
# This weak ref is dead by unchain
y2_ref = y2()
self.assertIsNone(y2_ref)
self.assertIsNone(f.inputs)
def test_label(self):
self.assertEqual(self.f.label, 'FunctionNode')
class TestFunctionNodeInvalidType(unittest.TestCase):
def test_forward_invalid1(self):
class FunctionNode(chainer.FunctionNode):
def check_type_forward(self, in_types):
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
)
def forward(self, inputs):
return inputs
f = FunctionNode()
# OK
v = chainer.Variable(numpy.random.randn(1, 5).astype(numpy.float32))
result, = f.apply((v,))
assert isinstance(result, chainer.Variable)
# Incorrect dtype
# in py3, numpy dtypes are represented as class
msg = """\
Invalid operation is performed in: FunctionNode \\(Forward\\)
Expect: in_types\\[0\\]\\.dtype == <(type|class) 'numpy\\.float32'>
Actual: float64 \\!= <(type|class) 'numpy\\.float32'>"""
v = chainer.Variable(numpy.random.randn(1, 5))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f.apply((v,))
# Incorrect dim
msg = """\
Invalid operation is performed in: FunctionNode \\(Forward\\)
Expect: in_types\\[0\\]\\.ndim >= 2
Actual: 1 < 2"""
v = chainer.Variable(numpy.random.randn(5).astype(numpy.float32))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f.apply((v,))
class TestFunctionNodeInconsistentBackends(unittest.TestCase):
def setUp(self):
self.x1 = numpy.random.rand(2, 3).astype(numpy.float32)
self.x2 = numpy.random.rand(2, 3).astype(numpy.float32)
@attr.gpu
def test_inconsistent_inputs(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
return inputs
f = FunctionNode()
# Cause inconsistency between inputs
x1 = cuda.to_gpu(self.x1)
x1 = chainer.Variable(x1)
x2 = chainer.Variable(self.x2)
with self.assertRaises(ValueError):
f.apply((x1, x2))
@attr.gpu
def test_inconsistent_outputs(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
# Cause inconsistency between outputs
return inputs[0], cuda.to_gpu(inputs[1])
f = FunctionNode()
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
with self.assertRaises(ValueError):
f.apply((x1, x2))
@testing.parameterize(
{'return_value': (numpy.array([float('nan')], numpy.float32),),
'valid': False},
{'return_value': (numpy.array([1], numpy.int32),), 'valid': True},
)
class TestFunctionNodeForwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array([1], numpy.float32)
self.f = chainer.FunctionNode()
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_forward(self, x_data):
x = chainer.Variable(x_data)
if self.valid:
# check if forward throws nothing
self.f.apply((x,))
else:
with self.assertRaises(RuntimeError):
self.f.apply((x,))
def test_debug_forward_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=self.return_value)
self.check_debug_forward(self.one)
@attr.gpu
def test_debug_forward_gpu(self):
return_value = tuple(None if x is None else cuda.to_gpu(x)
for x in self.return_value)
self.f.forward_gpu = mock.MagicMock(return_value=return_value)
self.check_debug_forward(cuda.to_gpu(self.one))
@testing.parameterize(
{'return_data': (numpy.array(float('nan'), numpy.float32),),
'valid': False},
{'return_data': (None,), 'valid': True},
)
class TestFunctionNodeBackwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array(1, numpy.float32)
self.f = chainer.FunctionNode()
self.return_value = tuple(None if x is None else chainer.Variable(x)
for x in self.return_data)
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_backward_accumulate(self, *xs_data):
xs = [chainer.Variable(x) for x in xs_data]
y, = self.f.apply(xs)
if self.valid:
# check if backard throws nothing
y.backward()
else:
with self.assertRaises(RuntimeError):
y.backward()
def test_debug_backward_accumulate_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=(self.one,))
self.f.backward = mock.MagicMock(return_value=self.return_value)
input_value = (self.one,) * len(self.return_value)
self.check_debug_backward_accumulate(*input_value)
@attr.gpu
def test_debug_backward_accumulate_gpu(self):
self.f.forward_gpu = mock.MagicMock(
return_value=(cuda.to_gpu(self.one),))
for x in self.return_value:
if x is not None:
x.to_gpu()
input_value = (cuda.to_gpu(self.one),) * len(self.return_value)
self.f.backward = mock.MagicMock(return_value=self.return_value)
self.check_debug_backward_accumulate(*input_value)
class TestNoBackpropMode(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.array([1.], 'f'))
def test_no_backprop_mode(self):
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.no_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
def test_force_backprop_mode(self):
with chainer.no_backprop_mode():
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
class MyThread(threading.Thread):
def run(self):
x = chainer.Variable(numpy.array([1], dtype='f'))
with chainer.no_backprop_mode():
y = x + 1
self.creator_is_none = y.creator_node is None
class TestBackpropModeMultiThread(unittest.TestCase):
def test_multi_thread(self):
t = MyThread()
t.start()
t.join()
self.assertTrue(t.creator_is_none)
class FunctionNodeWithRetaining(chainer.FunctionNode):
def forward(self, inputs):
self.retain_inputs([1])
self.retain_outputs([1])
return inputs
def backward(self, _, grad_outputs):
self.backward_inputs = self.get_retained_inputs()
self.backward_outputs = self.get_retained_outputs()
return grad_outputs
class TestFunctionNodeRetaining(unittest.TestCase):
def setUp(self):
inputs = [chainer.Variable(numpy.array([1], dtype=numpy.float32)),
chainer.Variable(numpy.array([1], dtype=numpy.float32))]
self.input_data = [x.data for x in inputs]
self.input_nodes = [x.node for x in inputs]
self.f1 = FunctionNodeWithRetaining()
outputs = self.f1.apply(inputs)
outputs[0].grad = numpy.array([1], dtype=numpy.float32)
outputs[0].backward()
self.f1_output_data = [y.data for y in outputs]
self.f1_output_nodes = [y.node for y in outputs]
inputs = None # release non-retained inputs
def test_retain_inputs(self):
self.assertEqual(len(self.f1.backward_inputs), 1)
self.assertIs(self.f1.backward_inputs[0].node, self.input_nodes[1])
numpy.testing.assert_array_equal(self.f1.backward_inputs[0].data,
self.input_data[1])
def test_retain_outputs_f1(self):
self.assertEqual(len(self.f1.backward_outputs), 1)
numpy.testing.assert_array_equal(self.f1.backward_outputs[0].data,
self.f1_output_data[1])
def _get_value(x):
if isinstance(x, chainer.Variable):
return x.data
return x
class TestGradTypeCheck(unittest.TestCase):
def test_type_check(self):
x = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
y = x * x
gx = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
gy = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
chainer.grad([y], [x], [gx], [gy])
chainer.grad((y,), (x,), (gx,), (gy,))
with self.assertRaises(TypeError):
chainer.grad(y, [x], [gx], [gy])
with self.assertRaises(TypeError):
chainer.grad([y], x, [gx], [gy])
with self.assertRaises(TypeError):
chainer.grad([y], [x], gx, [gy])
with self.assertRaises(TypeError):
chainer.grad([y], [x], [gx], gy)
class GradTestBase(object):
shape = 3,
x_names = ()
y_names = ()
loss_scale = None
def _init_attrs(self, names):
ret = []
for name in names:
v = chainer.Variable(
numpy.random.randint(-4, 6, self.shape).astype('f'), name=name)
ret.append(v)
setattr(self, name, v)
return ret
def _init_ones(self, names):
ret = []
for name in names:
v = chainer.Variable(numpy.ones(self.shape, dtype='f'))
ret.append(v)
setattr(self, name, v)
return ret
@staticmethod
def _get_value(x):
if isinstance(x, chainer.Variable):
return x.data
return x
@staticmethod
def _to_grad_names(names):
return ['g%s' % name for name in names]
def setUp(self):
self.xs = self._init_attrs(self.x_names)
self.gxs = self._init_attrs(self._to_grad_names(self.x_names))
self.gys = self._init_attrs(self._to_grad_names(self.y_names))
if self.loss_scale is not None:
self._init_ones(self._to_grad_names(self.y_names))
self.gys = None
def use_gpu(self):
for value in six.itervalues(self.__dict__):
if isinstance(value, chainer.Variable):
value.to_gpu()
def forward(self):
raise NotImplementedError
def expected_grad(self):
raise NotImplementedError
def expected_double_grad(self):
raise NotImplementedError
def _print_variables(self, name, vs):
print('{}: '.format(name), end='')
print(*(self._get_value(v) for v in vs), sep=', ')
def _print_inputs(self):
self._print_variables('xs ', self.xs)
self._print_variables('gxs ', self.gxs)
self._print_variables('gys ', self.gys)
def check_grad(self):
self.forward()
ys = [getattr(self, name) for name in self.y_names]
gxs = chainer.grad(ys, self.xs, self.gys, self.gxs,
loss_scale=self.loss_scale)
expected = self.expected_grad()
for i, gx in enumerate(self.gxs):
expected[i] += gx
self.assertEqual(len(gxs), len(expected))
try:
for a, e in zip(gxs, expected):
testing.assert_allclose(self._get_value(a), self._get_value(e))
except Exception:
self._print_inputs()
self._print_variables('gxs (actual) ', gxs)
self._print_variables('gxs (expected)', expected)
raise
def test_grad_cpu(self):
self.check_grad()
@attr.gpu
def test_grad_gpu(self):
self.use_gpu()
self.check_grad()
def check_double_grad(self):
self.forward()
ys = [getattr(self, name) for name in self.y_names]
gxs = chainer.grad(ys, self.xs, self.gys, self.gxs,
enable_double_backprop=True,
loss_scale=self.loss_scale)
y = sum(gxs)
ggxs = chainer.grad([y], self.xs)
expected = self.expected_double_grad()
self.assertEqual(len(ggxs), len(expected))
try:
for a, e in zip(ggxs, expected):
testing.assert_allclose(self._get_value(a), self._get_value(e))
except Exception:
self._print_inputs()
self._print_variables('gxs ', gxs)
self._print_variables('ggxs (actual) ', ggxs)
self._print_variables('ggxs (expected)', expected)
raise
def test_double_grad_cpu(self):
self.check_double_grad()
@attr.gpu
def test_double_grad_gpu(self):
self.use_gpu()
self.check_double_grad()
@testing.parameterize(*testing.product({
'loss_scale': [None, 1, 10],
}))
class TestGradSimple(GradTestBase, unittest.TestCase):
x_names = 'x',
y_names = 'y',
def forward(self):
self.y = self.x * self.x
def expected_grad(self):
grad = 2 * self.x * self.gy
if self.loss_scale is not None:
grad *= self.loss_scale
return [grad]
def expected_double_grad(self):
ggrad = 2 * self.gy
if self.loss_scale is not None:
ggrad *= self.loss_scale
return [ggrad]
class TestGradComplex(GradTestBase, unittest.TestCase):
x_names = 'x1', 'x2'
y_names = 'y1', 'y2'
def forward(self):
self.z = self.x1 * self.x1
self.y1 = self.z + self.x1 * self.x2 + self.x2
self.y2 = self.z + self.y1
def expected_grad(self):
dz_dx = 2 * self.x1
dy1_dx = self.gy1 + self.gy2
return [dy1_dx * (dz_dx + self.x2) + self.gy2 * dz_dx,
dy1_dx * (self.x1 + 1)]
def expected_double_grad(self):
dy1_dx = self.gy1 + self.gy2
return [3 * dy1_dx + 2 * self.gy2, dy1_dx]
testing.run_module(__name__, __file__)
|
|
from __future__ import unicode_literals
import django.forms.models as model_forms
import json
import six
from django import forms
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import ModelFormMixin, UpdateView, CreateView, ProcessFormView, FormView
from django.views.generic.base import TemplateView
from django.views.generic import DetailView, ListView
from guardian.shortcuts import get_objects_for_user, assign_perm
from guardian.utils import get_anonymous_user
from smartmin.csv_imports.models import ImportTask
from . import widgets
def smart_url(url, obj=None):
"""
URLs that start with @ are reversed, using the passed in arguments.
Otherwise a straight % substitution is applied.
"""
if url.find("@") >= 0:
(args, value) = url.split('@')
if args:
val = getattr(obj, args, None)
return reverse(value, args=[val])
else:
return reverse(value)
else:
if obj is None:
return url
else:
return url % obj.id
class SmartView(object):
fields = None
exclude = None
field_config = {}
title = None
refresh = 0
template_name = None
pjax = None
# set by our CRUDL
url_name = None
# if we are part of a CRUDL, we keep a reference to it here, set by CRUDL
crudl = None
def __init__(self, *args):
"""
There are a few variables we want to mantain in the instance, not the
class.
"""
self.extra_context = {}
super(SmartView, self).__init__()
def derive_title(self):
"""
Returns the title used on this page.
"""
return self.title
@classmethod
def derive_url_pattern(cls, path, action):
"""
Returns the URL pattern for this view.
"""
return r'^%s/%s/$' % (path, action)
def has_permission(self, request, *args, **kwargs):
"""
Figures out if the current user has permissions for this view.
"""
self.kwargs = kwargs
self.args = args
self.request = request
if not getattr(self, 'permission', None):
return True
else:
# first check our anonymous permissions
real_anon = get_anonymous_user()
has_perm = real_anon.has_perm(self.permission)
# if not, then check our real permissions
if not has_perm:
has_perm = request.user.has_perm(self.permission)
# if not, perhaps we have it per object
if not has_perm:
has_perm = self.has_object_permission('get_object')
# if still no luck, check if we have permissions on the parent object
if not has_perm:
has_perm = self.has_object_permission('get_parent_object')
return has_perm
def has_object_permission(self, getter_name):
"""
Checks for object level permission for an arbitrary getter
"""
obj = None
obj_getter = getattr(self, getter_name, None)
# get object requires pk
if getter_name == "get_object" and 'pk' not in self.kwargs:
return False
if obj_getter:
obj = obj_getter()
if obj:
return self.request.user.has_perm(getattr(self, 'permission', None), obj)
def dispatch(self, request, *args, **kwargs):
"""
Overloaded to check permissions if appropriate
"""
def wrapper(request, *args, **kwargs):
if not self.has_permission(request, *args, **kwargs):
path = urlquote(request.get_full_path())
login_url = kwargs.pop('login_url', settings.LOGIN_URL)
redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME)
return HttpResponseRedirect("%s?%s=%s" % (login_url, redirect_field_name, path))
else:
response = self.pre_process(request, *args, **kwargs)
if not response:
return super(SmartView, self).dispatch(request, *args, **kwargs)
else:
return response
return wrapper(request, *args, **kwargs)
def pre_process(self, request, *args, **kwargs):
"""
Gives the view an opportunity to intercept this response and return a different
response instead. This can be used to check some precondition for example and to
redirect the user somewhere else if they are not met.
Views which wish to use this should return a Response object.
"""
return None
def lookup_obj_attribute(self, obj, field):
"""
Looks for a field's value from the passed in obj. Note that this will strip
leading attributes to deal with subelements if possible
"""
curr_field = field.encode('ascii', 'ignore').decode("utf-8")
rest = None
if field.find('.') >= 0:
curr_field = field.split('.')[0]
rest = '.'.join(field.split('.')[1:])
# next up is the object itself
obj_field = getattr(obj, curr_field, None)
# if it is callable, do so
if obj_field and getattr(obj_field, '__call__', None):
obj_field = obj_field()
if obj_field and rest:
return self.lookup_obj_attribute(obj_field, rest)
else:
return obj_field
def lookup_field_value(self, context, obj, field):
"""
Looks up the field value for the passed in object and field name.
Note that this method is actually called from a template, but this provides a hook
for subclasses to modify behavior if they wish to do so.
This may be used for example to change the display value of a variable depending on
other variables within our context.
"""
curr_field = field.encode('ascii', 'ignore').decode("utf-8")
# if this isn't a subfield, check the view to see if it has a get_ method
if field.find('.') == -1:
# view supercedes all, does it have a 'get_' method for this obj
view_method = getattr(self, 'get_%s' % curr_field, None)
if view_method:
return view_method(obj)
return self.lookup_obj_attribute(obj, field)
def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
Our heuristic is as follows:
1) we check to see if our field_config has a label specified
2) if not, then we derive a field value from the field name
"""
# if this is a subfield, strip off everything but the last field name
if field.find('.') >= 0:
return self.lookup_field_label(context, field.split('.')[-1], default)
label = None
# is there a label specified for this field
if field in self.field_config and 'label' in self.field_config[field]:
label = self.field_config[field]['label']
# if we were given a default, use that
elif default:
label = default
# check our model
else:
for model_field in self.model._meta.fields:
if model_field.name == field:
return model_field.verbose_name.title()
# otherwise, derive it from our field name
if label is None:
label = self.derive_field_label(field)
return label
def lookup_field_help(self, field, default=None):
"""
Looks up the help text for the passed in field.
"""
help = None
# is there a label specified for this field
if field in self.field_config and 'help' in self.field_config[field]:
help = self.field_config[field]['help']
# if we were given a default, use that
elif default:
help = default
# try to see if there is a description on our model
elif hasattr(self, 'model'):
for model_field in self.model._meta.fields:
if model_field.name == field:
help = model_field.help_text
break
return help
def lookup_field_class(self, field, obj=None, default=None):
"""
Looks up any additional class we should include when rendering this field
"""
css = ""
# is there a class specified for this field
if field in self.field_config and 'class' in self.field_config[field]:
css = self.field_config[field]['class']
# if we were given a default, use that
elif default:
css = default
return css
def derive_field_label(self, field, obj=None):
"""
Derives a field label for the passed in field name.
"""
# replace _'s with ' '
label = field.replace('_', ' ').title()
return label
def derive_field_config(self):
"""
Derives the field config for this instance. By default we just use
self.field_config
"""
return self.field_config
def get_template_names(self):
"""
Returns the name of the template to use to render this request.
Smartmin provides default templates as fallbacks, so appends it's own templates names to the end
of whatever list is built by the generic views.
Subclasses can override this by setting a 'template_name' variable on the class.
"""
templates = []
if getattr(self, 'template_name', None):
templates.append(self.template_name)
if getattr(self, 'default_template', None):
templates.append(self.default_template)
else:
templates = super(SmartView, self).get_template_names()
return templates
def derive_fields(self):
"""
Default implementation
"""
fields = []
if self.fields:
fields.append(self.fields)
return fields
def derive_exclude(self):
"""
Returns which fields we should exclude
"""
exclude = []
if self.exclude:
exclude += self.exclude
return exclude
def derive_refresh(self):
"""
Returns how many milliseconds before we should refresh
"""
return self.refresh
def get_context_data(self, **kwargs):
"""
We supplement the normal context data by adding our fields and labels.
"""
context = super(SmartView, self).get_context_data(**kwargs)
# derive our field config
self.field_config = self.derive_field_config()
# add our fields
self.fields = self.derive_fields()
# build up our current parameter string, EXCLUSIVE of our page. These
# are used to build pagination URLs
url_params = "?"
order_params = ""
for key in self.request.REQUEST.keys():
if key != 'page' and key != 'pjax' and key[0] != '_':
for value in self.request.REQUEST.getlist(key):
url_params += "%s=%s&" % (key, urlquote(value))
elif key == '_order':
order_params = "&".join(["%s=%s" % (key, _) for _ in self.request.REQUEST.getlist(key)])
context['url_params'] = url_params
context['order_params'] = order_params + "&"
context['pjax'] = self.pjax
# set our blocks
context['blocks'] = dict()
# stuff it all in our context
context['fields'] = self.fields
context['view'] = self
context['field_config'] = self.field_config
context['title'] = self.derive_title()
# and any extra context the user specified
context.update(self.extra_context)
# by default, our base is 'base.html', but we might be pjax
base_template = "base.html"
if 'pjax' in self.request.REQUEST or 'pjax' in self.request.POST:
base_template = "smartmin/pjax.html"
if 'HTTP_X_PJAX' in self.request.META:
base_template = "smartmin/pjax.html"
context['base_template'] = base_template
# set our refresh if we have one
refresh = self.derive_refresh()
if refresh:
context['refresh'] = refresh
return context
def as_json(self, context):
"""
Responsible for turning our context into an dict that can then be serialized into an
JSON response.
"""
return context
def render_to_response(self, context, **response_kwargs):
"""
Overloaded to deal with _format arguments.
"""
# should we actually render in json?
if '_format' in self.request.REQUEST and self.request.REQUEST['_format'] == 'json':
json_data = self.as_json(context)
return HttpResponse(json.dumps(json_data), content_type='application/javascript')
# otherwise, return normally
else:
return super(SmartView, self).render_to_response(context)
class SmartTemplateView(SmartView, TemplateView):
pass
class SmartReadView(SmartView, DetailView):
default_template = 'smartmin/read.html'
edit_button = None
field_config = {'modified_blurb': dict(label="Modified"),
'created_blurb': dict(label="Created")}
def derive_queryset(self):
return super(SmartReadView, self).get_queryset()
def get_queryset(self):
self.queryset = self.derive_queryset()
return self.queryset
def derive_title(self):
"""
By default we just return the string representation of our object
"""
return str(self.object)
@classmethod
def derive_url_pattern(cls, path, action):
"""
Returns the URL pattern for this view.
"""
return r'^%s/%s/(?P<pk>\d+)/$' % (path, action)
def derive_fields(self):
"""
Derives our fields. We first default to using our 'fields' variable if available,
otherwise we figure it out from our object.
"""
if self.fields:
return list(self.fields)
else:
fields = []
for field in self.object._meta.fields:
fields.append(field.name)
# only exclude? then remove those items there
exclude = self.derive_exclude()
# remove any excluded fields
fields = [field for field in fields if field not in exclude]
return fields
def get_modified_blurb(self, obj):
return "%s by %s" % (obj.modified_on.strftime("%B %d, %Y at %I:%M %p"), obj.modified_by)
def get_created_blurb(self, obj):
return "%s by %s" % (obj.created_on.strftime("%B %d, %Y at %I:%M %p"), obj.created_by)
class SmartDeleteView(SmartView, DetailView, ProcessFormView):
default_template = 'smartmin/delete_confirm.html'
name_field = 'name'
cancel_url = None
redirect_url = None
@classmethod
def derive_url_pattern(cls, path, action):
"""
Returns the URL pattern for this view.
"""
return r'^%s/%s/(?P<pk>\d+)/$' % (path, action)
def get_cancel_url(self):
if not self.cancel_url:
raise ImproperlyConfigured("DeleteView must define a cancel_url")
return smart_url(self.cancel_url, self.object)
def pre_delete(self, obj):
# auto populate modified_by if it is present
if hasattr(obj, 'modified_by_id') and self.request.user.id >= 0:
obj.modified_by = self.request.user
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.pre_delete(self.object)
redirect_url = self.get_redirect_url()
self.object.delete()
return HttpResponseRedirect(redirect_url)
def get_redirect_url(self, **kwargs):
if not self.redirect_url:
raise ImproperlyConfigured("DeleteView must define a redirect_url")
return smart_url(self.redirect_url)
def get_context_data(self, **kwargs):
""" Add in the field to use for the name field """
context = super(SmartDeleteView, self).get_context_data(**kwargs)
context['name_field'] = self.name_field
context['cancel_url'] = self.get_cancel_url()
return context
class SmartListView(SmartView, ListView):
default_template = 'smartmin/list.html'
link_url = None
link_fields = None
add_button = None
search_fields = None
paginate_by = 25
field_config = { 'is_active': dict(label=''), }
default_order = None
select_related = None
list_permission = None
@classmethod
def derive_url_pattern(cls, path, action):
if action == 'list':
return r'^%s/$' % (path)
else:
return r'^%s/%s/$' % (path, action)
def derive_search_fields(self):
"""
Derives our search fields, by default just returning what was set
"""
return self.search_fields
def derive_title(self):
"""
Derives our title from our list
"""
title = super(SmartListView, self).derive_title()
if not title:
return force_text(self.model._meta.verbose_name_plural).title()
else:
return title
def derive_link_fields(self, context):
"""
Used to derive which fields should be linked. This should return a set() containing
the names of those fields which should be linkable.
"""
if not self.link_fields is None:
return self.link_fields
else:
link_fields = set()
if self.fields:
for field in self.fields:
if field != 'is_active':
link_fields.add(field)
break
return link_fields
def lookup_field_link(self, context, field, obj):
"""
By default we just return /view/{{ id }}/ for the current object.
"""
return smart_url(self.link_url, obj)
def lookup_field_orderable(self, field):
"""
Returns whether the passed in field is sortable or not, by default all 'raw' fields, that
is fields that are part of the model are sortable.
"""
try:
self.model._meta.get_field_by_name(field)
return True
except:
# that field doesn't exist, so not sortable
return False
def get_context_data(self, **kwargs):
"""
Add in what fields are linkable
"""
context = super(SmartListView, self).get_context_data(**kwargs)
# our linkable fields
self.link_fields = self.derive_link_fields(context)
# stuff it all in our context
context['link_fields'] = self.link_fields
# our search term if any
if 'search' in self.request.REQUEST:
context['search'] = self.request.REQUEST['search']
# our ordering field if any
order = self.derive_ordering()
if order:
if order[0] == '-':
context['order'] = order[1:]
context['order_asc'] = False
else:
context['order'] = order
context['order_asc'] = True
return context
def derive_select_related(self):
return self.select_related
def derive_queryset(self, **kwargs):
"""
Derives our queryset.
"""
# get our parent queryset
queryset = super(SmartListView, self).get_queryset(**kwargs)
# apply any filtering
search_fields = self.derive_search_fields()
if search_fields and 'search' in self.request.REQUEST:
terms = self.request.REQUEST['search'].split()
query = Q(pk__gt=0)
for term in terms:
term_query = Q(pk__lt=0)
for field in search_fields:
term_query |= Q(**{ field: term })
query &= term_query
queryset = queryset.filter(query)
# add any select related
related = self.derive_select_related()
if related:
queryset = queryset.select_related(*related)
# return our queryset
return queryset
def get_queryset(self, **kwargs):
"""
Gets our queryset. This takes care of filtering if there are any
fields to filter by.
"""
queryset = self.derive_queryset(**kwargs)
# if our list should be filtered by a permission as well, do so
if self.list_permission:
# only filter if this user doesn't have a global permission
if not self.request.user.has_perm(self.list_permission):
user = self.request.user
# guardian only behaves with model users
if settings.ANONYMOUS_USER_ID and user.is_anonymous():
user = get_user_model().objects.get(pk=settings.ANONYMOUS_USER_ID)
queryset = queryset.filter(id__in=get_objects_for_user(user, self.list_permission))
return self.order_queryset(queryset)
def derive_ordering(self):
"""
Returns what field should be used for ordering (using a prepended '-' to indicate descending sort).
If the default order of the queryset should be used, returns None
"""
if '_order' in self.request.REQUEST:
return self.request.REQUEST['_order']
elif self.default_order:
return self.default_order
else:
return None
def order_queryset(self, queryset):
"""
Orders the passed in queryset, returning a new queryset in response. By default uses the _order query
parameter.
"""
order = self.derive_ordering()
# if we get our order from the request
# make sure it is a valid field in the list
if '_order' in self.request.REQUEST:
if order not in self.derive_fields():
order = None
if order:
# if our order is a single string, convert to a simple list
if isinstance(order, six.string_types):
order = (order,)
queryset = queryset.order_by(*order)
return queryset
def derive_fields(self):
"""
Derives our fields.
"""
if self.fields:
return self.fields
else:
fields = []
for field in self.object_list.model._meta.fields:
if field.name != 'id':
fields.append(field.name)
return fields
def get_is_active(self, obj):
"""
Default implementation of get_is_active which returns a simple div so as to
render a green dot for active items and nothing for inactive ones.
Users of SmartModel will get this rendering for free.
"""
if obj.is_active:
return '<div class="active_icon"></div>'
else:
return ''
def render_to_response(self, context, **response_kwargs):
"""
Overloaded to deal with _format arguments.
"""
# is this a select2 format response?
if self.request.REQUEST.get('_format', 'html') == 'select2':
results = []
for obj in context['object_list']:
result = None
if hasattr(obj, 'as_select2'):
result = obj.as_select2()
if not result:
result = dict(id=obj.pk, text="%s" % obj)
results.append(result)
json_data = dict(results=results, err='nil', more=context['page_obj'].has_next())
return HttpResponse(json.dumps(json_data), content_type='application/javascript')
# otherwise, return normally
else:
return super(SmartListView, self).render_to_response(context)
class SmartCsvView(SmartListView):
def derive_filename(self):
filename = getattr(self, 'filename', None)
if not filename:
filename = "%s.csv" % self.model._meta.verbose_name.lower()
return filename
def render_to_response(self, context, **response_kwargs):
import csv
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=%s' % self.derive_filename()
writer = csv.writer(response, quoting=csv.QUOTE_ALL)
fields = self.derive_fields()
# build up our header row
header = []
for field in fields:
header.append(six.text_type(self.lookup_field_label(dict(), field)))
writer.writerow([s.encode("utf-8") for s in header])
# then our actual values
for obj in self.object_list:
row = []
for field in fields:
row.append(six.text_type(self.lookup_field_value(dict(), obj, field)))
writer.writerow([s.encode("utf-8") for s in row])
return response
class SmartXlsView(SmartListView):
def derive_filename(self):
filename = getattr(self, 'filename', None)
if not filename:
filename = "%s.xls" % self.model._meta.verbose_name.lower()
return filename
def render_to_response(self, context, **response_kwargs):
from xlwt import Workbook
book = Workbook()
sheet1 = book.add_sheet(self.derive_title())
fields = self.derive_fields()
# build up our header row
for col in range(len(fields)):
field = fields[col]
sheet1.write(0, col, six.text_type(self.lookup_field_label(dict(), field)))
# then our actual values
for row in range(len(self.object_list)):
obj = self.object_list[row]
for col in range(len(fields)):
field = fields[col]
value = six.text_type(self.lookup_field_value(dict(), obj, field))
# skip the header
sheet1.write(row + 1, col, value)
# Create the HttpResponse object with the appropriate header.
response = HttpResponse(content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename=%s' % self.derive_filename()
book.save(response)
return response
class SmartFormMixin(object):
readonly = ()
field_config = {'modified_blurb': dict(label="Modified"),
'created_blurb': dict(label="Created")}
success_message = None
submit_button_name = _("Submit")
def derive_title(self):
"""
Derives our title from our object
"""
if not self.title:
return _("Form")
else:
return self.title
def derive_success_message(self):
"""
Returns a message to display when this form is successfully saved
"""
return self.success_message
def get_form(self, form_class=None):
"""
Returns an instance of the form to be used in this view.
"""
self.form = super(SmartFormMixin, self).get_form(form_class)
fields = list(self.derive_fields())
# apply our field filtering on our form class
exclude = self.derive_exclude()
exclude += self.derive_readonly()
# remove any excluded fields
for field in exclude:
if field in self.form.fields:
del self.form.fields[field]
if fields is not None:
# filter out our form fields
for name, field in self.form.fields.items():
if not name in fields:
del self.form.fields[name]
# stuff in our referer as the default location for where to return
location = forms.CharField(widget=forms.widgets.HiddenInput(), required=False)
if ('HTTP_REFERER' in self.request.META):
location.initial = self.request.META['HTTP_REFERER']
# add the location to our form fields
self.form.fields['loc'] = location
if fields:
fields.append('loc')
# provides a hook to programmatically customize fields before rendering
for (name, field) in self.form.fields.items():
field = self.customize_form_field(name, field)
self.form.fields[name] = field
return self.form
def customize_form_field(self, name, field):
"""
Allows views to customize their form fields. By default, Smartmin replaces the plain textbox
date input with it's own DatePicker implementation.
"""
if isinstance(field, forms.fields.DateField) and isinstance(field.widget, forms.widgets.DateInput):
field.widget = widgets.DatePickerWidget()
if isinstance(field, forms.fields.ImageField) and isinstance(field.widget, forms.widgets.ClearableFileInput):
field.widget = widgets.ImageThumbnailWidget()
return field
def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.label
break
return super(SmartFormMixin, self).lookup_field_label(context, field, default=default)
def lookup_field_help(self, field, default=None):
"""
Looks up the help text for the passed in field.
This is overloaded so that we can check whether our form has help text set
explicitely. If so, we will pass this as the default to our parent function.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.help_text
break
return super(SmartFormMixin, self).lookup_field_help(field, default=default)
def derive_readonly(self):
"""
Figures out what fields should be readonly. We iterate our field_config to find all
that have a readonly of true
"""
readonly = list(self.readonly)
for key, value in self.field_config.items():
if 'readonly' in value and value['readonly']:
readonly.append(key)
return readonly
def derive_fields(self):
"""
Derives our fields.
"""
if self.fields is not None:
fields = list(self.fields)
else:
form = self.form
fields = []
for field in form:
fields.append(field.name)
# this is slightly confusing but we add in readonly fields here because they will still
# need to be displayed
readonly = self.derive_readonly()
if readonly:
fields += readonly
# remove any excluded fields
for exclude in self.derive_exclude():
if exclude in fields:
fields.remove(exclude)
return fields
def get_form_class(self):
"""
Returns the form class to use in this view
"""
form_class = None
if self.form_class:
form_class = self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
# run time parameters when building our form
factory_kwargs = self.get_factory_kwargs()
form_class = model_forms.modelform_factory(model, **factory_kwargs)
return form_class
def get_factory_kwargs(self):
"""
Let's us specify any extra parameters we might want to call for our form factory.
These can include: 'form', 'fields', 'exclude' or 'formfield_callback'
"""
params = dict()
exclude = self.derive_exclude()
exclude += self.derive_readonly()
if self.fields:
fields = list(self.fields)
for ex in exclude:
if ex in fields:
fields.remove(ex)
params['fields'] = fields
if exclude:
params['exclude'] = exclude
return params
def get_success_url(self):
"""
By default we use the referer that was stuffed in our
form when it was created
"""
if self.success_url:
# if our smart url references an object, pass that in
if self.success_url.find('@') > 0:
return smart_url(self.success_url, self.object)
else:
return smart_url(self.success_url, None)
elif 'loc' in self.form.cleaned_data:
return self.form.cleaned_data['loc']
raise ImproperlyConfigured("No redirect location found, override get_success_url to not use redirect urls")
def derive_initial(self):
"""
Returns what initial dict should be passed to our form. By default this is empty.
"""
return dict()
def get_form_kwargs(self):
"""
We override this, using only those fields specified if they are specified.
Otherwise we include all fields in a standard ModelForm.
"""
kwargs = super(SmartFormMixin, self).get_form_kwargs()
kwargs['initial'] = self.derive_initial()
return kwargs
def derive_submit_button_name(self):
"""
Returns the name for our button
"""
return self.submit_button_name
def get_context_data(self, **kwargs):
context = super(SmartFormMixin, self).get_context_data(**kwargs)
context['submit_button_name'] = self.derive_submit_button_name()
return context
class SmartFormView(SmartFormMixin, SmartView, FormView):
default_template = 'smartmin/form.html'
def form_valid(self, form):
# plug in our success message
messages.success(self.request, self.derive_success_message())
return super(SmartFormView, self).form_valid(form)
class SmartModelFormView(SmartFormMixin, SmartView, ModelFormMixin):
grant_permissions = None
javascript_submit = None
field_config = { 'modified_blurb': dict(label="Modified"),
'created_blurb': dict(label="Created") }
def derive_title(self):
"""
Derives our title from our object
"""
if not self.title:
return _("Edit %s") % force_text(self.model._meta.verbose_name).title()
else:
return self.title
def pre_save(self, obj):
"""
Called before an object is saved away
"""
return obj
def save(self, obj):
"""
Actually does the saving of this object, this is when the object is committed
"""
self.object.save()
self.save_m2m()
def form_valid(self, form):
self.object = form.save(commit=False)
try:
self.object = self.pre_save(self.object)
self.save(self.object)
self.object = self.post_save(self.object)
messages.success(self.request, self.derive_success_message())
if 'HTTP_X_FORMAX' not in self.request.META:
return HttpResponseRedirect(self.get_success_url())
else:
response = self.render_to_response(self.get_context_data(form=form))
response['REDIRECT'] = self.get_success_url()
return response
except IntegrityError as e:
message = str(e).capitalize()
errors = self.form._errors.setdefault(forms.forms.NON_FIELD_ERRORS, forms.utils.ErrorList())
errors.append(message)
return self.render_to_response(self.get_context_data(form=form))
def save_m2m(self):
"""
By default saves the form's m2m, can be overridden if a more complicated m2m model exists
"""
self.form.save_m2m()
def post_save(self, obj):
"""
Called after an object is successfully saved
"""
# if we have permissions to grant, do so
if self.grant_permissions:
for permission in self.grant_permissions:
# if the user doesn't have this permission globally already
if not self.request.user.has_perm(permission):
# then assign it for this object
assign_perm(permission, self.request.user, self.object)
return obj
def get_context_data(self, **kwargs):
context = super(SmartModelFormView, self).get_context_data(**kwargs)
context['javascript_submit'] = self.javascript_submit
return context
class SmartUpdateView(SmartModelFormView, UpdateView):
default_template = 'smartmin/update.html'
exclude = ('created_by', 'modified_by')
submit_button_name = _("Save Changes")
# allows you to specify the name of URL to use for a remove link that will automatically be shown
delete_url = None
def derive_queryset(self):
return super(SmartUpdateView, self).get_queryset()
def get_queryset(self):
self.queryset = self.derive_queryset()
return self.queryset
@classmethod
def derive_url_pattern(cls, path, action):
"""
Returns the URL pattern for this view.
"""
return r'^%s/%s/(?P<pk>\d+)/$' % (path, action)
def derive_success_message(self):
# First check whether a default message has been set
if self.success_message is None:
return "Your %s has been updated." % self.model._meta.verbose_name
else:
return self.success_message
def pre_save(self, obj):
# auto populate modified_by if it is present
if hasattr(obj, 'modified_by_id') and self.request.user.id >= 0:
obj.modified_by = self.request.user
return obj
def get_context_data(self, **kwargs):
context = super(SmartUpdateView, self).get_context_data(**kwargs)
if self.delete_url:
context['delete_url'] = smart_url(self.delete_url, self.object)
return context
def get_modified_blurb(self, obj):
return "%s by %s" % (obj.modified_on.strftime("%B %d, %Y at %I:%M %p"), obj.modified_by)
def get_created_blurb(self, obj):
return "%s by %s" % (obj.created_on.strftime("%B %d, %Y at %I:%M %p"), obj.created_by)
class SmartMultiFormView(SmartView, TemplateView):
default_template = 'smartmin/multi_form.html'
forms = {}
# allows you to specify the name of URL to use for a remove link that will automatically be shown
delete_url = None
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
page_forms = []
for prefix, form in self.forms.items():
f = form(prefix=prefix)
page_forms.append(f)
context['forms'] = page_forms
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
# process our forms
page_forms = []
valid = True
for prefix, form in self.forms.items():
f = form(request.POST, prefix=prefix)
valid = valid and f.is_valid()
page_forms.append(f)
if not valid:
context['forms'] = page_forms
return self.render_to_response(context)
else:
# redirect to success page
pass
def get_context_data(self, **kwargs):
context = super(SmartMultiFormView, self).get_context_data(**kwargs)
if self.delete_url:
context['delete_url'] = smart_url(self.delete_url, self.object)
return context
class SmartCreateView(SmartModelFormView, CreateView):
default_template = 'smartmin/create.html'
exclude = ('created_by', 'modified_by', 'is_active')
submit_button_name = _("Create")
def pre_save(self, obj):
# auto populate created_by if it is present
if hasattr(obj, 'created_by_id') and self.request.user.id >= 0:
obj.created_by = self.request.user
# auto populate modified_by if it is present
if hasattr(obj, 'modified_by_id') and self.request.user.id >= 0:
obj.modified_by = self.request.user
return obj
def derive_success_message(self):
# First check whether a default message has been set
if self.success_message is None:
return _("Your new %s has been created.") % self.model._meta.verbose_name
else:
return self.success_message
def derive_title(self):
"""
Derives our title from our object
"""
if not self.title:
return _("Create %s") % force_text(self.model._meta.verbose_name).title()
else:
return self.title
class SmartCSVImportView(SmartCreateView):
success_url = 'id@csv_imports.importtask_read'
fields = ('csv_file',)
def derive_title(self):
return _("Import %s") % self.crudl.model._meta.verbose_name_plural.title()
def pre_save(self, obj):
obj = super(SmartCSVImportView, self).pre_save(obj)
obj.model_class = "%s.%s" % (self.crudl.model.__module__, self.crudl.model.__name__)
return obj
def post_save(self, task):
task = super(SmartCSVImportView, self).post_save(task)
task.import_params = json.dumps(self.form.data)
# kick off our CSV import
task.start()
return task
class SmartCRUDL(object):
actions = ('create', 'read', 'update', 'delete', 'list')
model_name = None
app_name = None
module_name = None
path = None
permissions = True
def __init__(self, model=None, path=None, actions=None):
# set our model if passed in
if model:
self.model = model
# derive our model name
if not self.model_name:
self.model_name = self.model._meta.object_name
# derive our app name
if not self.app_name:
self.app_name = self.model._meta.app_label
# derive our path from our class name
if not path and not self.path:
self.path = self.model_name.lower()
# derive our module name from our class's module
if not self.module_name:
parts = self.__class__.__module__.split(".")
self.module_name = parts[-2]
# deal with special case of views subdirectories, we need to go up one more to find the real module
if self.module_name == 'views' and len(parts) >= 3:
self.module_name = parts[-3]
# set our actions if set
if actions:
self.actions = actions
def permission_for_action(self, action):
"""
Returns the permission to use for the passed in action
"""
return "%s.%s_%s" % (self.app_name.lower(), self.model_name.lower(), action)
def template_for_action(self, action):
"""
Returns the template to use for the passed in action
"""
return "%s/%s_%s.html" % (self.module_name.lower(), self.model_name.lower(), action)
def url_name_for_action(self, action):
"""
Returns the reverse name for this action
"""
return "%s.%s_%s" % (self.module_name.lower(), self.model_name.lower(), action)
def view_for_action(self, action):
"""
Returns the appropriate view class for the passed in action
"""
# this turns replace_foo into ReplaceFoo and read into Read
class_name = "".join([word.capitalize() for word in action.split("_")])
view = None
# see if we have a custom class defined for this action
if hasattr(self, class_name):
# return that one
view = getattr(self, class_name)
# no model set? set it ourselves
if not getattr(view, 'model', None):
view.model = self.model
# no permission and we are supposed to set them, do so
if not hasattr(view, 'permission') and self.permissions:
view.permission = self.permission_for_action(action)
# set our link URL based on read and update
if not getattr(view, 'link_url', None):
if 'read' in self.actions:
view.link_url = 'id@%s' % self.url_name_for_action('read')
elif 'update' in self.actions:
view.link_url = 'id@%s' % self.url_name_for_action('update')
# if we can't infer a link URL then view class must override lookup_field_link
if not getattr(view, 'link_url', None) and 'lookup_field_link' not in view.__dict__:
view.link_fields = ()
# set add_button based on existence of Create view if add_button not explicitly set
if action == 'list' and getattr(view, 'add_button', None) is None:
view.add_button = 'create' in self.actions
# set edit_button based on existence of Update view if edit_button not explicitly set
if action == 'read' and getattr(view, 'edit_button', None) is None:
view.edit_button = 'update' in self.actions
# if update or create, set success url if not set
if not getattr(view, 'success_url', None) and (action == 'update' or action == 'create'):
view.success_url = '@%s' % self.url_name_for_action('list')
# otherwise, use our defaults
else:
options = dict(model=self.model)
# if this is an update or create, and we have a list view, then set the default to that
if action == 'update' or action == 'create' and 'list' in self.actions:
options['success_url'] = '@%s' % self.url_name_for_action('list')
# set permissions if appropriate
if self.permissions:
options['permission'] = self.permission_for_action(action)
if action == 'create':
view = type(str("%sCreateView" % self.model_name), (SmartCreateView,), options)
elif action == 'read':
if 'update' in self.actions:
options['edit_button'] = True
view = type(str("%sReadView" % self.model_name), (SmartReadView,), options)
elif action == 'update':
if 'delete' in self.actions:
options['delete_url'] = 'id@%s' % self.url_name_for_action('delete')
view = type(str("%sUpdateView" % self.model_name), (SmartUpdateView,), options)
elif action == 'delete':
if 'list' in self.actions:
options['cancel_url'] = '@%s' % self.url_name_for_action('list')
options['redirect_url'] = '@%s' % self.url_name_for_action('list')
elif 'update' in self.actions:
options['cancel_url'] = '@%s' % self.url_name_for_action('update')
view = type(str("%sDeleteView" % self.model_name), (SmartDeleteView,), options)
elif action == 'list':
if 'read' in self.actions:
options['link_url'] = 'id@%s' % self.url_name_for_action('read')
elif 'update' in self.actions:
options['link_url'] = 'id@%s' % self.url_name_for_action('update')
else:
options['link_fields'] = ()
if 'create' in self.actions:
options['add_button'] = True
view = type(str("%sListView" % self.model_name), (SmartListView,), options)
elif action == 'csv_import':
options['model'] = ImportTask
view = type(str("%sCSVImportView" % self.model_name), (SmartCSVImportView,), options)
if not view:
# couldn't find a view? blow up
raise Exception("No view found for action: %s" % action)
# set the url name for this view
view.url_name = self.url_name_for_action(action)
# no template set for it? set one based on our action and app name
if not getattr(view, 'template_name', None):
view.template_name = self.template_for_action(action)
view.crudl = self
return view
def pattern_for_view(self, view, action):
"""
Returns the URL pattern for the passed in action.
"""
# if this view knows how to define a URL pattern, call that
if getattr(view, 'derive_url_pattern', None):
return view.derive_url_pattern(self.path, action)
# otherwise take our best guess
else:
return r'^%s/%s/$' % (self.path, action)
def as_urlpatterns(self):
"""
Creates the appropriate URL patterns for this object.
"""
urlpatterns = patterns('')
# for each of our actions
for action in self.actions:
view_class = self.view_for_action(action)
view_pattern = self.pattern_for_view(view_class, action)
name = self.url_name_for_action(action)
urlpatterns += patterns('', url(view_pattern, view_class.as_view(), name=name))
return urlpatterns
|
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology, create_directories
from Ruby import send_evicts
#
# Declare caches used by the protocol
#
class L1Cache(RubyCache): pass
class L2Cache(RubyCache): pass
def define_options(parser):
return
def create_system(options, full_system, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MOESI_CMP_directory':
panic("This script requires the MOESI_CMP_directory protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
# the ruby random tester reuses num_cpus to specify the
# number of cpu ports connected to the tester object, which
# is stored in system.cpu. because there is only ever one
# tester object, num_cpus is not necessarily equal to the
# size of system.cpu; therefore if len(system.cpu) == 1
# we use system.cpu[0] to set the clk_domain, thereby ensuring
# we don't index off the end of the cpu list.
if len(system.cpu) == 1:
clk_domain = system.cpu[0].clk_domain
else:
clk_domain = system.cpu[i].clk_domain
l1_cntrl = L1Cache_Controller(version=i, L1Icache=l1i_cache,
L1Dcache=l1d_cache,
l2_select_num_bits=l2_bits,
send_evictions=send_evicts(options),
transitions_per_cycle=options.ports,
clk_domain=clk_domain,
ruby_system=ruby_system)
cpu_seq = RubySequencer(version=i, icache=l1i_cache,
dcache=l1d_cache, clk_domain=clk_domain,
ruby_system=ruby_system)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
# Add controllers and sequencers to the appropriate lists
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.mandatoryQueue = MessageBuffer()
l1_cntrl.requestFromL1Cache = MessageBuffer()
l1_cntrl.requestFromL1Cache.master = ruby_system.network.slave
l1_cntrl.responseFromL1Cache = MessageBuffer()
l1_cntrl.responseFromL1Cache.master = ruby_system.network.slave
l1_cntrl.requestToL1Cache = MessageBuffer()
l1_cntrl.requestToL1Cache.slave = ruby_system.network.master
l1_cntrl.responseToL1Cache = MessageBuffer()
l1_cntrl.responseToL1Cache.slave = ruby_system.network.master
l1_cntrl.triggerQueue = MessageBuffer(ordered = True)
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
L2cache = l2_cache,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.GlobalRequestFromL2Cache = MessageBuffer()
l2_cntrl.GlobalRequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = MessageBuffer()
l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
l2_cntrl.GlobalRequestToL2Cache = MessageBuffer()
l2_cntrl.GlobalRequestToL2Cache.slave = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = MessageBuffer()
l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
l2_cntrl.responseToL2Cache = MessageBuffer()
l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
l2_cntrl.triggerQueue = MessageBuffer(ordered = True)
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
dir_cntrl_nodes = create_directories(options, system.mem_ranges,
ruby_system)
for dir_cntrl in dir_cntrl_nodes:
# Connect the directory controllers and the network
dir_cntrl.requestToDir = MessageBuffer()
dir_cntrl.requestToDir.slave = ruby_system.network.master
dir_cntrl.responseToDir = MessageBuffer()
dir_cntrl.responseToDir.slave = ruby_system.network.master
dir_cntrl.responseFromDir = MessageBuffer()
dir_cntrl.responseFromDir.master = ruby_system.network.slave
dir_cntrl.forwardFromDir = MessageBuffer()
dir_cntrl.forwardFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromMemory = MessageBuffer()
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system,
slave = dma_port)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the dma controller to the network
dma_cntrl.mandatoryQueue = MessageBuffer()
dma_cntrl.responseFromDir = MessageBuffer()
dma_cntrl.responseFromDir.slave = ruby_system.network.master
dma_cntrl.reqToDir = MessageBuffer()
dma_cntrl.reqToDir.master = ruby_system.network.slave
dma_cntrl.respToDir = MessageBuffer()
dma_cntrl.respToDir.master = ruby_system.network.slave
dma_cntrl.triggerQueue = MessageBuffer(ordered = True)
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.mandatoryQueue = MessageBuffer()
io_controller.responseFromDir = MessageBuffer()
io_controller.responseFromDir.slave = ruby_system.network.master
io_controller.reqToDir = MessageBuffer()
io_controller.reqToDir.master = ruby_system.network.slave
io_controller.respToDir = MessageBuffer()
io_controller.respToDir.master = ruby_system.network.slave
io_controller.triggerQueue = MessageBuffer(ordered = True)
all_cntrls = all_cntrls + [io_controller]
ruby_system.network.number_of_virtual_networks = 3
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
|
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from murano.dsl import dsl
from murano.dsl import exceptions
from murano.tests.unit.dsl.foundation import object_model as om
from murano.tests.unit.dsl.foundation import test_case
class TestContracts(test_case.DslTestCase):
def setUp(self):
super(TestContracts, self).setUp()
self._runner = self.new_runner(
om.Object(
'ContractExamples',
ordinaryProperty='PROPERTY',
sampleClass=om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=om.Object(
'SampleClass2',
class2Property='string2'))))
def test_string_contract(self):
result = self._runner.testStringContract('qwerty')
self.assertIsInstance(result, six.string_types)
self.assertEqual('qwerty', result)
def test_string_from_number_contract(self):
result = self._runner.testStringContract(123)
self.assertIsInstance(result, six.string_types)
self.assertEqual('123', result)
def test_string_null_contract(self):
self.assertIsNone(self._runner.testStringContract(None))
def test_int_contract(self):
result = self._runner.testIntContract(123)
self.assertIsInstance(result, int)
self.assertEqual(123, result)
def test_int_from_string_contract(self):
result = self._runner.testIntContract('456')
self.assertIsInstance(result, int)
self.assertEqual(456, result)
def test_int_from_string_contract_failure(self):
self.assertRaises(exceptions.ContractViolationException,
self._runner.testIntContract, 'nan')
def test_int_null_contract(self):
self.assertIsNone(self._runner.testIntContract(None))
def test_bool_contract(self):
result = self._runner.testBoolContract(True)
self.assertIsInstance(result, bool)
self.assertTrue(result)
result = self._runner.testBoolContract(False)
self.assertIsInstance(result, bool)
self.assertFalse(result)
def test_bool_from_int_contract(self):
result = self._runner.testBoolContract(10)
self.assertIsInstance(result, bool)
self.assertTrue(result)
result = self._runner.testBoolContract(0)
self.assertIsInstance(result, bool)
self.assertFalse(result)
def test_bool_from_string_contract(self):
result = self._runner.testBoolContract('something')
self.assertIsInstance(result, bool)
self.assertTrue(result)
result = self._runner.testBoolContract('')
self.assertIsInstance(result, bool)
self.assertFalse(result)
def test_bool_null_contract(self):
self.assertIsNone(self._runner.testIntContract(None))
def test_class_contract(self):
arg = om.Object('SampleClass2', class2Property='qwerty')
result = self._runner.testClassContract(arg)
self.assertIsInstance(result, dsl.MuranoObjectInterface)
def test_class_contract_by_ref(self):
arg = om.Object('SampleClass2', class2Property='qwerty')
result = self._runner.testClassContract(arg)
self.assertEqual(arg.id, result.id)
def test_class_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testClassContract, ['invalid type'])
def test_class_contract_by_ref_failure(self):
self.assertRaises(
exceptions.NoObjectFoundError,
self._runner.testClassContract, 'NoSuchIdExists')
def test_class_contract_from_dict(self):
self.assertEqual(
'SampleClass2',
self._runner.testClassContract({
'class2Property': 'str'}).type.name)
def test_class_from_id_contract(self):
object_id = self._runner.root.get_property('sampleClass').object_id
result = self._runner.testClassFromIdContract(object_id)
self.assertIsInstance(result, dsl.MuranoObjectInterface)
self.assertEqual(object_id, result.id)
def test_check_contract(self):
arg = om.Object('SampleClass2', class2Property='qwerty')
self.assertIsNone(self._runner.testCheckContract(arg, 100))
def test_check_contract_failure(self):
invalid_arg = om.Object('SampleClass2', class2Property='not qwerty')
self.assertRaises(exceptions.ContractViolationException,
self._runner.testCheckContract, invalid_arg, 100)
def test_owned_contract(self):
arg1 = self._runner.root.get_property('sampleClass')
arg2 = arg1.get_property('classProperty')
self.assertIsNone(self._runner.testOwnedContract(arg1, arg2))
def test_owned_contract_on_null(self):
self.assertIsNone(self._runner.testOwnedContract(None, None))
def test_owned_contract_failure(self):
arg1 = self._runner.root.get_property('sampleClass')
arg2 = arg1.get_property('classProperty')
invalid_arg2 = om.Object('SampleClass2', class2Property='string2')
invalid_arg1 = om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=invalid_arg2)
self.assertRaises(exceptions.ContractViolationException,
self._runner.testOwnedContract, invalid_arg1, arg2)
self.assertRaises(exceptions.ContractViolationException,
self._runner.testOwnedContract, invalid_arg2, arg1)
def test_not_owned_contract(self):
arg2 = om.Object('SampleClass2', class2Property='string2')
arg1 = om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=arg2)
self.assertIsNone(self._runner.testNotOwnedContract(arg1, arg2))
def test_not_owned_contract_on_null(self):
self.assertIsNone(self._runner.testNotOwnedContract(None, None))
def test_not_owned_contract_failure(self):
invalid_arg1 = self._runner.root.get_property('sampleClass')
invalid_arg2 = invalid_arg1.get_property('classProperty')
arg2 = om.Object('SampleClass2', class2Property='string2')
arg1 = om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=arg2)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testNotOwnedContract, invalid_arg1, arg2)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testNotOwnedContract, invalid_arg2, arg1)
def test_scalar_contract(self):
self.assertEqual('fixed', self._runner.testScalarContract(
'fixed', 456, True))
def test_scalar_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testScalarContract,
'wrong', 456, True)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testScalarContract,
'fixed', 123, True)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testScalarContract,
'fixed', 456, False)
def test_list_contract(self):
self.assertEqual([3, 2, 1], self._runner.testListContract(
['3', 2, '1']))
def test_list_contract_from_scalar(self):
self.assertEqual([99], self._runner.testListContract('99'))
def test_list_contract_from_null(self):
self.assertEqual([], self._runner.testListContract(None))
def test_list_with_min_length_contract(self):
self.assertEqual(
[1, 2, 3],
self._runner.testListWithMinLengthContract([1, 2, 3]))
self.assertEqual(
[1, 2, 3, 4],
self._runner.testListWithMinLengthContract([1, 2, 3, 4]))
def test_list_with_min_length_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinLengthContract, None)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinLengthContract, [1, 2])
def test_list_with_min_max_length_contract(self):
self.assertEqual(
[1, 2],
self._runner.testListWithMinMaxLengthContract([1, 2]))
self.assertEqual(
[1, 2, 3, 4],
self._runner.testListWithMinMaxLengthContract([1, 2, 3, 4]))
def test_list_with_min_max_length_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinMaxLengthContract, [1])
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinMaxLengthContract, [1, 2, 3, 4, 5])
def test_dict_contract(self):
self.assertEqual(
{'A': '123', 'B': 456},
self._runner.testDictContract({'A': '123', 'B': '456'}))
self.assertEqual(
{'A': '123', 'B': 456},
self._runner.testDictContract({'A': '123', 'B': '456', 'C': 'qq'}))
self.assertEqual(
{'A': '123', 'B': None},
self._runner.testDictContract({'A': '123'}))
def test_dict_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testDictContract, 'str')
def test_dict_expressions_contract(self):
self.assertEqual(
{321: 'qwerty', 99: 'val', 'B': 456},
self._runner.testDictExprContract({
'321': 'qwerty', '99': 'val', 'B': 456}))
def test_dict_expressions_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testDictExprContract,
{'321': 'qwerty', 'str': 'val', 'B': 456})
def test_invalid_dict_expr_contract(self):
self.assertRaises(
exceptions.DslContractSyntaxError,
self._runner.testDictMultiExprContract,
{'321': 'qwerty', 'str': 'val', 'B': 456})
def test_not_null_contract(self):
self.assertEqual('value', self._runner.testNotNullContract('value'))
def test_not_null_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testNotNullContract, None)
def test_default(self):
self.assertEqual('value', self._runner.testDefault('value'))
self.assertEqual('DEFAULT', self._runner.testDefault())
def test_default_expression(self):
self.assertEqual('PROPERTY', self._runner.testDefaultExpression())
self.assertEqual('value', self._runner.testDefaultExpression('value'))
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
from oslo_serialization import jsonutils
from heat.common import exception
from heat.common import template_format
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
neutron_port_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: net1234
fixed_ips:
- subnet: sub1234
ip_address: 10.0.3.21
device_owner: network:dhcp
'''
neutron_port_with_address_pair_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: abcd1234
allowed_address_pairs:
- ip_address: 10.0.3.21
mac_address: 00-B0-D0-86-BB-F7
'''
neutron_port_security_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: abcd1234
port_security_enabled: False
'''
class NeutronPortTest(common.HeatTestCase):
def setUp(self):
super(NeutronPortTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_port')
self.m.StubOutWithMock(neutronclient.Client, 'update_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
def test_missing_subnet_id(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('subnet')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_ip_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234'
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('ip_address')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_fixed_ips(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_allowed_address_pair(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234'
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_port_security_enabled(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234'
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'port_security_enabled': False,
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_security_template)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_mac_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234'
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties']['allowed_address_pairs'][0].pop(
'mac_address'
)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_ip_address_is_cidr(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234'
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.0/24',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "2e00180a-ff9d-42c4-b701-a0606b243447"
}})
neutronclient.Client.show_port(
'2e00180a-ff9d-42c4-b701-a0606b243447'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "2e00180a-ff9d-42c4-b701-a0606b243447"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties'][
'allowed_address_pairs'][0]['ip_address'] = '10.0.3.0/24'
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def _mock_create_with_security_groups(self, port_prop):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234'
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
def test_security_groups(self):
port_prop = {
'network_id': u'net1234',
'security_groups': ['8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740'],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740']
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_security_groups_empty_list(self):
port_prop = {
'network_id': u'net1234',
'security_groups': [],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = []
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_create_and_update_port(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
new_props = props.copy()
new_props['name'] = "new_name"
new_props['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_props_update = new_props.copy()
new_props_update.pop('network_id')
new_props1 = new_props.copy()
new_props1.pop('security_groups')
new_props_update1 = new_props_update.copy()
new_props_update1['security_groups'] = [
'0389f747-7785-4757-b7bb-2ab07e4b09c3']
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes(
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update}
).AndReturn(None)
fake_groups_list = {
'security_groups': [
{
'tenant_id': 'dc4b074874244f7693dd65583733a758',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'default',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
self.m.StubOutWithMock(neutronclient.Client, 'list_security_groups')
neutronclient.Client.list_security_groups().AndReturn(
fake_groups_list)
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update1}
).AndReturn(None)
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
# update port
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
scheduler.TaskRunner(port.update, update_snippet)()
# update again to test port without security group
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props1)
scheduler.TaskRunner(port.update, update_snippet)()
self.m.VerifyAll()
def test_port_needs_update(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
new_props = props.copy()
# test always replace
new_props['replacement_policy'] = 'REPLACE_ALWAYS'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertRaises(exception.UpdateReplace, port._needs_update,
update_snippet, port.frozen_definition(),
new_props, props, None)
# test deferring to Resource._needs_update
new_props['replacement_policy'] = 'AUTO'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertTrue(port._needs_update(update_snippet,
port.frozen_definition(),
new_props, props, None))
self.m.VerifyAll()
def test_get_port_attributes(self):
subnet_dict = {'name': 'test-subnet', 'enable_dhcp': True,
'network_id': 'net1234', 'dns_nameservers': [],
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'ipv6_ra_mode': None, 'cidr': '10.0.0.0/24',
'allocation_pools': [{'start': '10.0.0.2',
'end': u'10.0.0.254'}],
'gateway_ip': '10.0.0.1', 'ipv6_address_mode': None,
'ip_version': 4, 'host_routes': [],
'id': '6dd609ad-d52a-4587-b1a0-b335f76062a5'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndReturn({'subnet': subnet_dict})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('DOWN', port.FnGetAtt('status'))
self.assertEqual([], port.FnGetAtt('allowed_address_pairs'))
self.assertTrue(port.FnGetAtt('admin_state_up'))
self.assertEqual('net1234', port.FnGetAtt('network_id'))
self.assertEqual('fa:16:3e:75:67:60', port.FnGetAtt('mac_address'))
self.assertEqual(utils.PhysName('test_stack', 'port'),
port.FnGetAtt('name'))
self.assertEqual('dc68eg2c-b60g-4b3f-bd82-67ec87650532',
port.FnGetAtt('device_id'))
self.assertEqual('58a61fc3992944ce971404a2ece6ff98',
port.FnGetAtt('tenant_id'))
self.assertEqual(['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
port.FnGetAtt('security_groups'))
self.assertEqual([{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}],
port.FnGetAtt('fixed_ips'))
self.assertEqual([subnet_dict], port.FnGetAtt('subnets'))
self.assertRaises(exception.InvalidTemplateAttribute,
port.FnGetAtt, 'Foo')
self.m.VerifyAll()
def test_subnet_attribute_exception(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndRaise(qe.NeutronClientException('ConnectionFailed: Connection '
'to neutron failed: Maximum '
'attempts reached'))
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertIsNone(port.FnGetAtt('subnets'))
log_msg = ('Failed to fetch resource attributes: ConnectionFailed: '
'Connection to neutron failed: Maximum attempts reached')
self.assertIn(log_msg, self.LOG.output)
self.m.VerifyAll()
def test_vnic_create_update(self):
port_prop = {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': 'network:dhcp',
'binding:vnic_type': 'direct'
}
new_port_prop = port_prop.copy()
new_port_prop['binding:vnic_type'] = 'normal'
new_port_prop['name'] = "new_name"
new_port_prop['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_port_prop.pop('network_id')
prop_update = copy.deepcopy(new_port_prop)
new_port_prop['replacement_policy'] = 'AUTO'
new_port_prop['network'] = u'net1234'
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234'
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.stub_SubnetConstraint_validate()
self.stub_NetworkConstraint_validate()
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
prop_update2 = copy.deepcopy(prop_update)
prop_update2['binding:vnic_type'] = 'direct'
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update2}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['binding:vnic_type'] = 'direct'
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('direct', port.properties['binding:vnic_type'])
# update to normal
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop)
new_port_prop2 = copy.deepcopy(new_port_prop)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('normal', port.properties['binding:vnic_type'])
# update back to direct
new_port_prop2['binding:vnic_type'] = 'direct'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop2)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('direct', port.properties['binding:vnic_type'])
self.m.VerifyAll()
def test_prepare_for_replace_port_not_created(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
port = stack['port']
port._show_resource = mock.Mock()
port.data_set = mock.Mock()
n_client = mock.Mock()
port.client = mock.Mock(return_value=n_client)
self.assertIsNone(port.resource_id)
# execute prepare_for_replace
port.prepare_for_replace()
# check, if the port is not created, do nothing in
# prepare_for_replace()
self.assertFalse(port._show_resource.called)
self.assertFalse(port.data_set.called)
self.assertFalse(n_client.update_port.called)
def test_prepare_for_replace_port(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
port = stack['port']
port.resource_id = 'test_res_id'
_value = {
'fixed_ips': {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
}
port._show_resource = mock.Mock(return_value=_value)
port.data_set = mock.Mock()
n_client = mock.Mock()
port.client = mock.Mock(return_value=n_client)
# execute prepare_for_replace
port.prepare_for_replace()
# check, that data was stored
port.data_set.assert_called_once_with(
'port_fip', jsonutils.dumps(_value.get('fixed_ips')))
# check, that port was updated and ip was removed
expected_props = {'port': {'fixed_ips': []}}
n_client.update_port.assert_called_once_with('test_res_id',
expected_props)
def test_restore_prev_rsrc(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
new_port = stack['port']
new_port.resource_id = 'new_res_id'
# mock backup stack to return only one mocked old_port
old_port = mock.Mock()
new_port.stack._backup_stack = mock.Mock()
new_port.stack._backup_stack().resources.get.return_value = old_port
old_port.resource_id = 'old_res_id'
_value = {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
old_port.data = mock.Mock(
return_value={'port_fip': jsonutils.dumps(_value)})
n_client = mock.Mock()
new_port.client = mock.Mock(return_value=n_client)
# execute restore_prev_rsrc
new_port.restore_prev_rsrc()
# check, that ports were updated: old port get ip and
# same ip was removed from old port
expected_new_props = {'port': {'fixed_ips': []}}
expected_old_props = {'port': {'fixed_ips': _value}}
n_client.update_port.assert_has_calls([
mock.call('new_res_id', expected_new_props),
mock.call('old_res_id', expected_old_props)])
def test_restore_prev_rsrc_convergence(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
stack.store()
# mock resource from previous template
prev_rsrc = stack['port']
prev_rsrc.resource_id = 'prev-rsrc'
# store in db
prev_rsrc.state_set(prev_rsrc.UPDATE, prev_rsrc.COMPLETE)
# mock resource from existing template and store in db
existing_rsrc = stack['port']
existing_rsrc.current_template_id = stack.t.id
existing_rsrc.resource_id = 'existing-rsrc'
existing_rsrc.state_set(existing_rsrc.UPDATE, existing_rsrc.COMPLETE)
# mock previous resource was replaced by existing resource
prev_rsrc.replaced_by = existing_rsrc.id
_value = {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
prev_rsrc._data = {'port_fip': jsonutils.dumps(_value)}
n_client = mock.Mock()
prev_rsrc.client = mock.Mock(return_value=n_client)
# execute restore_prev_rsrc
prev_rsrc.restore_prev_rsrc(convergence=True)
expected_existing_props = {'port': {'fixed_ips': []}}
expected_prev_props = {'port': {'fixed_ips': _value}}
n_client.update_port.assert_has_calls([
mock.call(existing_rsrc.resource_id, expected_existing_props),
mock.call(prev_rsrc.resource_id, expected_prev_props)])
|
|
"""
The MIT License
Copyright (c) 2008 Gilad Raphaelli <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
if using < python2.5, http://code.activestate.com/recipes/523034/ works as a
pure python collections.defaultdict substitute
"""
#from collections import defaultdict
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
import MySQLdb
def longish(x):
if len(x):
try:
return long(x)
except ValueError:
return longish(x[:-1])
else:
raise ValueError
def hexlongish(x):
if len(x):
try:
return long(str(x), 16)
except ValueError:
return longish(x[:-1])
else:
raise ValueError
def parse_innodb_status(innodb_status_raw, innodb_version="1.0"):
def sumof(status):
def new(*idxs):
return sum(map(lambda x: longish(status[x]), idxs))
return new
innodb_status = defaultdict(int)
innodb_status['active_transactions']
individual_buffer_pool_info = False
for line in innodb_status_raw:
istatus = line.split()
isum = sumof(istatus)
# SEMAPHORES
if "Mutex spin waits" in line:
innodb_status['spin_waits'] += longish(istatus[3])
innodb_status['spin_rounds'] += longish(istatus[5])
innodb_status['os_waits'] += longish(istatus[8])
elif "RW-shared spins" in line:
if innodb_version == 1.0:
innodb_status['spin_waits'] += isum(2,8)
innodb_status['os_waits'] += isum(5,11)
elif innodb_version >= 5.5:
innodb_status['spin_waits'] += longish(istatus[2])
innodb_status['os_waits'] += longish(istatus[7])
elif "RW-excl spins" in line and innodb_version >= 5.5:
innodb_status['spin_waits'] += longish(istatus[2])
innodb_status['os_waits'] += longish(istatus[7])
# TRANSACTIONS
elif "Trx id counter" in line:
if innodb_version >= 5.6:
innodb_status['transactions'] += longish(istatus[3])
elif innodb_version == 5.5:
innodb_status['transactions'] += hexlongish(istatus[3])
else:
innodb_status['transactions'] += isum(3,4)
elif "Purge done for trx" in line:
if innodb_version >= 5.6:
innodb_status['transactions_purged'] += longish(istatus[6])
elif innodb_version == 5.5:
innodb_status['transactions_purged'] += hexlongish(istatus[6])
else:
innodb_status['transactions_purged'] += isum(6,7)
elif "History list length" in line:
innodb_status['history_list'] = longish(istatus[3])
elif "---TRANSACTION" in line and innodb_status['transactions']:
innodb_status['current_transactions'] += 1
if "ACTIVE" in line:
innodb_status['active_transactions'] += 1
elif "LOCK WAIT" in line and innodb_status['transactions']:
innodb_status['locked_transactions'] += 1
elif 'read views open inside' in line:
innodb_status['read_views'] = longish(istatus[0])
# FILE I/O
elif 'OS file reads' in line:
innodb_status['data_reads'] = longish(istatus[0])
innodb_status['data_writes'] = longish(istatus[4])
innodb_status['data_fsyncs'] = longish(istatus[8])
elif 'Pending normal aio' in line:
innodb_status['pending_normal_aio_reads'] = longish(istatus[4])
innodb_status['pending_normal_aio_writes'] = longish(istatus[7])
elif 'ibuf aio reads' in line:
innodb_status['pending_ibuf_aio_reads'] = longish(istatus[3])
innodb_status['pending_aio_log_ios'] = longish(istatus[6])
innodb_status['pending_aio_sync_ios'] = longish(istatus[9])
elif 'Pending flushes (fsync)' in line:
innodb_status['pending_log_flushes'] = longish(istatus[4])
innodb_status['pending_buffer_pool_flushes'] = longish(istatus[7])
# INSERT BUFFER AND ADAPTIVE HASH INDEX
elif 'merged recs' in line and innodb_version == 1.0:
innodb_status['ibuf_inserts'] = longish(istatus[0])
innodb_status['ibuf_merged'] = longish(istatus[2])
innodb_status['ibuf_merges'] = longish(istatus[5])
elif 'Ibuf: size' in line and innodb_version >= 5.5:
innodb_status['ibuf_merges'] = longish(istatus[10])
elif 'merged operations' in line and innodb_version >= 5.5:
in_merged = 1
elif 'delete mark' in line and 'in_merged' in vars() and innodb_version >= 5.5:
innodb_status['ibuf_inserts'] = longish(istatus[1])
innodb_status['ibuf_merged'] = 0
del in_merged
# LOG
elif "log i/o's done" in line:
innodb_status['log_writes'] = longish(istatus[0])
elif "pending log writes" in line:
innodb_status['pending_log_writes'] = longish(istatus[0])
innodb_status['pending_chkp_writes'] = longish(istatus[4])
elif "Log sequence number" in line:
if innodb_version >= 5.5:
innodb_status['log_bytes_written'] = longish(istatus[3])
else:
innodb_status['log_bytes_written'] = isum(3,4)
elif "Log flushed up to" in line:
if innodb_version >= 5.5:
innodb_status['log_bytes_flushed'] = longish(istatus[4])
else:
innodb_status['log_bytes_flushed'] = isum(4,5)
# BUFFER POOL AND MEMORY
elif "INDIVIDUAL BUFFER POOL INFO" in line:
# individual pools section. We only want to record the totals
# rather than each individual pool clobbering the totals
individual_buffer_pool_info = True
elif "Buffer pool size, bytes" in line and not individual_buffer_pool_info:
innodb_status['buffer_pool_pages_bytes'] = longish(istatus[4])
elif "Buffer pool size" in line and not individual_buffer_pool_info:
innodb_status['buffer_pool_pages_total'] = longish(istatus[3])
elif "Free buffers" in line and not individual_buffer_pool_info:
innodb_status['buffer_pool_pages_free'] = longish(istatus[2])
elif "Database pages" in line and not individual_buffer_pool_info:
innodb_status['buffer_pool_pages_data'] = longish(istatus[2])
elif "Modified db pages" in line and not individual_buffer_pool_info:
innodb_status['buffer_pool_pages_dirty'] = longish(istatus[3])
elif "Pages read" in line and "ahead" not in line and not individual_buffer_pool_info:
innodb_status['pages_read'] = longish(istatus[2])
innodb_status['pages_created'] = longish(istatus[4])
innodb_status['pages_written'] = longish(istatus[6])
# ROW OPERATIONS
elif 'Number of rows inserted' in line:
innodb_status['rows_inserted'] = longish(istatus[4])
innodb_status['rows_updated'] = longish(istatus[6])
innodb_status['rows_deleted'] = longish(istatus[8])
innodb_status['rows_read'] = longish(istatus[10])
elif "queries inside InnoDB" in line:
innodb_status['queries_inside'] = longish(istatus[0])
innodb_status['queries_queued'] = longish(istatus[4])
# Some more stats
innodb_status['transactions_unpurged'] = innodb_status['transactions'] - innodb_status['transactions_purged']
innodb_status['log_bytes_unflushed'] = innodb_status['log_bytes_written'] - innodb_status['log_bytes_flushed']
return innodb_status
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-H", "--Host", dest="host", help="Host running mysql", default="localhost")
parser.add_option("-u", "--user", dest="user", help="user to connect as", default="")
parser.add_option("-p", "--password", dest="passwd", help="password", default="")
(options, args) = parser.parse_args()
try:
conn = MySQLdb.connect(user=options.user, host=options.host, passwd=options.passwd)
cursor = conn.cursor(MySQLdb.cursors.Cursor)
cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS")
innodb_status = parse_innodb_status(cursor.fetchone()[0].split('\n'))
cursor.close()
conn.close()
except MySQLdb.OperationalError, (errno, errmsg):
raise
|
|
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import html
from itertools import chain
from mistletoe import block_token, span_token
from mistletoe.base_renderer import BaseRenderer
import sys
class XWiki20Renderer(BaseRenderer):
"""
XWiki syntax 2.0 renderer class.
See mistletoe.base_renderer module for more info.
"""
def __init__(self, *extras):
"""
Args:
extras (list): allows subclasses to add even more custom tokens.
"""
self.listTokens = []
self.lastChildOfQuotes = []
self.firstChildOfListItems = []
localExtras = [block_token.HTMLBlock, span_token.HTMLSpan, span_token.XWikiBlockMacroStart, span_token.XWikiBlockMacroEnd]
super().__init__(*chain(localExtras, extras))
def render_strong(self, token):
template = '**{}**'
return template.format(self.render_inner(token))
def render_emphasis(self, token):
template = '//{}//'
return template.format(self.render_inner(token))
def render_inline_code(self, token):
# Note: XWiki also offers preformatted text syntax ('##{}##') as a shorter alternative.
# We would have to escape the raw text when using it.
template = '{{{{code}}}}{}{{{{/code}}}}'
return template.format(self.render_raw_text(token.children[0], False))
def render_strikethrough(self, token):
template = '--{}--'
return template.format(self.render_inner(token))
def render_image(self, token):
template = '[[image:{src}]]'
inner = self.render_inner(token)
return template.format(src=token.src)
def render_link(self, token):
template = '[[{inner}>>{target}]]'
target = escape_url(token.target)
inner = self.render_inner(token)
return template.format(target=target, inner=inner)
def render_auto_link(self, token):
template = '[[{target}]]'
target = escape_url(token.target)
return template.format(target=target)
def render_escape_sequence(self, token):
return '~' + self.render_inner(token)
def render_raw_text(self, token, escape=True):
return (token.content.replace('~', '~~')
# Note: It's probably better to leave potential XWiki macros as-is, i. e. don't escape their markers:
#.replace('{{', '~{{').replace('}}', '~}}')
.replace('[[', '~[[').replace(']]', '~]]')
.replace('**', '~**').replace('//', '~//')
.replace('##', '~##').replace('--', '~--')
) if escape else token.content
def render_x_wiki_block_macro_start(self, token):
return token.content + '\n'
def render_x_wiki_block_macro_end(self, token):
return '\n' + token.content
def render_html_span(self, token):
# XXX: HTMLSpan parses (contains) only individual opening and closing tags
# => no easy way to wrap the whole HTML code into {{html}} like this:
#
# template = '{{{{html wiki="true"}}}}{}{{{{/html}}}}'
# return template.format(token.content)
#
# => Users must do this themselves after the conversion.
return token.content
def render_html_block(self, token):
template = '{{{{html wiki="true"}}}}\n{}\n{{{{/html}}}}' + self._block_eol(token)
return template.format(token.content)
def render_heading(self, token):
template = '{level} {inner} {level}'
inner = self.render_inner(token)
return template.format(level='=' * token.level, inner=inner) + self._block_eol(token)
def render_quote(self, token):
self.lastChildOfQuotes.append(token.children[-1])
inner = self.render_inner(token)
del (self.lastChildOfQuotes[-1])
return (''.join(map(lambda line: '>{}{}'.format('' if line.startswith('>') else ' ', line), inner.splitlines(keepends=True)))
+ self._block_eol(token)[0:-1])
def render_paragraph(self, token):
return '{}'.format(self.render_inner(token)) + self._block_eol(token)
def render_block_code(self, token):
template = '{{{{code{attr}}}}}\n{inner}{{{{/code}}}}' + self._block_eol(token)
if token.language:
attr = ' language="{}"'.format(token.language)
else:
attr = ''
inner = self.render_raw_text(token.children[0], False)
return template.format(attr=attr, inner=inner)
def render_list(self, token):
inner = self.render_inner(token)
return inner + self._block_eol(token)[0:-1]
def render_list_item(self, token):
template = '{prefix} {inner}\n'
prefix = ''.join(self.listTokens)
if '1' in self.listTokens:
prefix += '.'
self.firstChildOfListItems.append(token.children[0])
inner = self.render_inner(token)
del (self.firstChildOfListItems[-1])
result = template.format(prefix=prefix, inner=inner.rstrip())
return result
def render_inner(self, token):
if isinstance(token, block_token.List):
if token.start:
self.listTokens.append('1')
else:
self.listTokens.append('*')
rendered = [self.render(child) for child in token.children]
wrap = False
if isinstance(token, block_token.BlockToken) and len(token.children) > 1:
# test what follows after the 1st child of this block token - wrap it to a XWiki block right after the 1st child if necessary
for child in token.children[1:]:
if isinstance(token, block_token.ListItem) and not isinstance(child, block_token.List):
# Note: Nested list within a list item is OK, because it does its own wrapping if necessary.
wrap = True
break
if isinstance(token, (block_token.TableCell)) and isinstance(child, block_token.BlockToken):
# Note: By-design, Markdown doesn't support multiple lines in one cell, but they can be enforced by using HTML.
# See e. g. https://stackoverflow.com/questions/19950648/how-to-write-lists-inside-a-markdown-table.
wrap = True
break
if isinstance(token, block_token.List):
del (self.listTokens[-1])
return (''.join(rendered) if not wrap
else '{head}(((\n{tail}\n)))\n'.format(head=rendered[0].rstrip(), tail=''.join(rendered[1:]).rstrip()))
def render_table(self, token):
# Copied from JIRARenderer...
#
# This is actually gross and I wonder if there's a better way to do it.
#
# The primary difficulty seems to be passing down alignment options to
# reach individual cells.
template = '{inner}\n'
if hasattr(token, 'header'):
head_template = '{inner}'
header = token.header
head_inner = self.render_table_row(header, True)
head_rendered = head_template.format(inner=head_inner)
else:
head_rendered = ''
body_template = '{inner}'
body_inner = self.render_inner(token)
body_rendered = body_template.format(inner=body_inner)
return template.format(inner=head_rendered+body_rendered)
def render_table_row(self, token, is_header=False):
if is_header:
template = '{inner}\n'
else:
template = '{inner}\n'
inner = ''.join([self.render_table_cell(child, is_header)
for child in token.children])
return template.format(inner=inner)
def render_table_cell(self, token, in_header=False):
if in_header:
template = '|={inner}'
else:
template = '|{inner}'
inner = self.render_inner(token)
return template.format(inner=inner)
@staticmethod
def render_thematic_break(token):
return '----\n'
@staticmethod
def render_line_break(token):
return ' ' if token.soft else '\n'
def render_document(self, token):
self.footnotes.update(token.footnotes)
return self.render_inner(token)
def _block_eol(self, token):
return ('\n' if ((len(self.firstChildOfListItems) > 0 and token is self.firstChildOfListItems[-1])
or (len(self.lastChildOfQuotes) > 0 and token is self.lastChildOfQuotes[-1])) else '\n\n')
def escape_url(raw):
"""
Escape urls to prevent code injection craziness. (Hopefully.)
"""
from urllib.parse import quote
return quote(html.unescape(raw), safe='/#:()*?=%@+,&;')
|
|
# Copyright 2016 Nexenta Systems, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
VERSION = '1.3.1'
LOG = logging.getLogger(__name__)
@interface.volumedriver
class NexentaISCSIDriver(driver.ISCSIDriver):
"""Executes volume driver commands on Nexenta Appliance.
Version history:
.. code-block:: none
1.0.0 - Initial driver version.
1.0.1 - Fixed bug #1236626: catch "does not exist" exception of
lu_exists.
1.1.0 - Changed class name to NexentaISCSIDriver.
1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy.
1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs
clone.
1.1.3 - Extended volume stats provided by _update_volume_stats method.
1.2.0 - Added volume migration with storage assist method.
1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location
of migrated volume; after migrating volume migrate_volume
destroy snapshot on migration destination.
1.3.0 - Added retype method.
1.3.0.1 - Target creation refactor.
1.3.1 - Added ZFS cleanup.
"""
VERSION = VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Nexenta_CI"
def __init__(self, *args, **kwargs):
super(NexentaISCSIDriver, self).__init__(*args, **kwargs)
self.nms = None
self.targets = {}
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_ISCSI_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTS)
self.nms_protocol = self.configuration.nexenta_rest_protocol
self.nms_host = self.configuration.nexenta_host
self.nms_port = self.configuration.nexenta_rest_port
self.nms_user = self.configuration.nexenta_user
self.nms_password = self.configuration.nexenta_password
self.volume = self.configuration.nexenta_volume
self.volume_compression = (
self.configuration.nexenta_dataset_compression)
self.volume_deduplication = self.configuration.nexenta_dataset_dedup
self.volume_description = (
self.configuration.nexenta_dataset_description)
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.iscsi_target_portal_port = (
self.configuration.nexenta_iscsi_target_portal_port)
self._needless_objects = set()
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
if self.nms_protocol == 'auto':
protocol, auto = 'http', True
else:
protocol, auto = self.nms_protocol, False
self.nms = jsonrpc.NexentaJSONProxy(
protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user,
self.nms_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(self.volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA") %
self.volume)
def _get_zvol_name(self, volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (self.volume, volume_name)
def _create_target(self, target_idx):
target_name = '%s%s-%i' % (
self.configuration.nexenta_target_prefix,
self.nms_host,
target_idx
)
target_group_name = self._get_target_group_name(target_name)
if not self._target_exists(target_name):
try:
self.nms.iscsitarget.create_target({
'target_name': target_name})
except exception.NexentaException as exc:
if 'already' in exc.args[0]:
LOG.info(_LI('Ignored target creation error "%s" while '
'ensuring export.'),
exc)
else:
raise
if not self._target_group_exists(target_group_name):
try:
self.nms.stmf.create_targetgroup(target_group_name)
except exception.NexentaException as exc:
if ('already' in exc.args[0]):
LOG.info(_LI('Ignored target group creation error "%s" '
'while ensuring export.'),
exc)
else:
raise
if not self._target_member_in_target_group(target_group_name,
target_name):
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except exception.NexentaException as exc:
if ('already' in exc.args[0]):
LOG.info(_LI('Ignored target group member addition error '
'"%s" while ensuring export.'),
exc)
else:
raise
self.targets[target_name] = []
return target_name
def _get_target_name(self, volume):
"""Return iSCSI target name with least LUs."""
provider_location = volume.get('provider_location')
target_names = self.targets.keys()
if provider_location:
target_name = provider_location.split(',1 ')[1].split(' ')[0]
if not(self.targets.get(target_name)):
self.targets[target_name] = []
if not(volume['name'] in self.targets[target_name]):
self.targets[target_name].append(volume['name'])
elif not(target_names):
# create first target and target group
target_name = self._create_target(0)
self.targets[target_name].append(volume['name'])
else:
target_name = target_names[0]
for target in target_names:
if len(self.targets[target]) < len(self.targets[target_name]):
target_name = target
if len(self.targets[target_name]) >= 20:
# create new target and target group
target_name = self._create_target(len(target_names))
if not(volume['name'] in self.targets[target_name]):
self.targets[target_name].append(volume['name'])
return target_name
def _get_target_group_name(self, target_name):
"""Return Nexenta iSCSI target group name for volume."""
return target_name.replace(
self.configuration.nexenta_target_prefix,
self.configuration.nexenta_target_group_prefix
)
@staticmethod
def _get_clone_snapshot_name(volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
@staticmethod
def _is_clone_snapshot_name(snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
:return: model update dict for volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
six.text_type(self.configuration.nexenta_blocksize),
self.configuration.nexenta_sparse)
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']),
'volsize', '%sG' % new_size)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
volume_name = self._get_zvol_name(volume['name'])
try:
props = self.nms.zvol.get_child_props(volume_name, 'origin') or {}
self.nms.zvol.destroy(volume_name, '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Volume %s does not exist, it '
'seems it was already deleted.'), volume_name)
return
if 'zvol has children' in exc.args[0]:
self._mark_as_garbage(volume_name)
LOG.info(_LI('Volume %s will be deleted later.'), volume_name)
return
raise
origin = props.get('origin')
self._collect_garbage(origin)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
snapshot = {'volume_name': src_vref['name'],
'name': self._get_clone_snapshot_name(volume),
'volume_size': src_vref['size']}
LOG.debug('Creating temp snapshot of the original volume: '
'%(volume_name)s@%(name)s', snapshot)
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete origin volume. But when cloned volume deleted
# we check its origin property and delete source snapshot if needed.
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
self._mark_as_garbage('@'.join(
(self._get_zvol_name(src_vref['name']), snapshot['name'])))
except exception.NexentaException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
'Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (exception.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_LW('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
@staticmethod
def get_nms_for_url(url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path = (
utils.parse_nms_url(url))
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] not in ('available', 'retyping'):
return false_ret
if 'capabilities' not in host:
return false_ret
capabilities = host['capabilities']
if ('location_info' not in capabilities or
'iscsi_target_portal_port' not in capabilities or
'nms_url' not in capabilities):
return false_ret
nms_url = capabilities['nms_url']
dst_parts = capabilities['location_info'].split(':')
if (capabilities.get('vendor_name') != 'Nexenta' or
dst_parts[0] != self.__class__.__name__ or
capabilities['free_capacity_gb'] < volume['size']):
return false_ret
dst_host, dst_volume = dst_parts[1:]
ssh_bound = False
ssh_bindings = self.nms.appliance.ssh_list_bindings()
for bind in ssh_bindings:
if dst_host.startswith(ssh_bindings[bind][3]):
ssh_bound = True
break
if not ssh_bound:
LOG.warning(_LW("Remote NexentaStor appliance at %s should be "
"SSH-bound."), dst_host)
# Create temporary snapshot of volume on NexentaStor Appliance.
snapshot = {
'volume_name': volume['name'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
src = '%(volume)s/%(zvol)s@%(snapshot)s' % {
'volume': self.volume,
'zvol': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume])
try:
self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self.get_nms_for_url(nms_url)
dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'],
snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
return True, None
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
options = dict(
compression='compression',
dedup='dedup',
description='nms:description'
)
retyped = False
migrated = False
capabilities = host['capabilities']
src_backend = self.__class__.__name__
dst_backend = capabilities['location_info'].split(':')[0]
if src_backend != dst_backend:
LOG.warning(_LW('Cannot retype from %(src_backend)s to '
'%(dst_backend)s.'),
{
'src_backend': src_backend,
'dst_backend': dst_backend,
})
return False
hosts = (volume['host'], host['host'])
old, new = hosts
if old != new:
migrated, provider_location = self.migrate_volume(
context, volume, host)
if not migrated:
nms = self.nms
else:
nms_url = capabilities['nms_url']
nms = self.get_nms_for_url(nms_url)
zvol = '%s/%s' % (
capabilities['location_info'].split(':')[-1], volume['name'])
for opt in options:
old, new = diff.get('extra_specs').get(opt, (False, False))
if old != new:
LOG.debug('Changing %(opt)s from %(old)s to %(new)s.',
{'opt': opt, 'old': old, 'new': new})
try:
nms.zvol.set_child_prop(
zvol, options[opt], new)
retyped = True
except exception.NexentaException:
LOG.error(_LE('Error trying to change %(opt)s'
' from %(old)s to %(new)s'),
{'opt': opt, 'old': old, 'new': new})
return False, None
return retyped or migrated, None
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: snapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
if (('size' in volume) and (
volume['size'] > snapshot['volume_size'])):
self.extend_volume(volume, volume['size'])
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: snapshot reference
"""
volume_name = self._get_zvol_name(snapshot['volume_name'])
snapshot_name = '%s@%s' % (volume_name, snapshot['name'])
try:
self.nms.snapshot.destroy(snapshot_name, '')
except exception.NexentaException as exc:
if "does not exist" in exc.args[0]:
LOG.info(_LI('Snapshot %s does not exist, it seems it was '
'already deleted.'), snapshot_name)
return
elif "snapshot has dependent clones" in exc.args[0]:
self._mark_as_garbage(snapshot_name)
LOG.info(_LI('Snapshot %s has dependent clones, will be '
'deleted later.'), snapshot_name)
return
raise
self._collect_garbage(volume_name)
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
raise NotImplementedError
def _target_exists(self, target):
"""Check if iSCSI target exist.
:param target: target name
:return: True if target exist, else False
"""
targets = self.nms.stmf.list_targets()
if not targets:
return False
return (target in self.nms.stmf.list_targets())
def _target_group_exists(self, target_group):
"""Check if target group exist.
:param target_group: target group
:return: True if target group exist, else False
"""
groups = self.nms.stmf.list_targetgroups()
if not groups:
return False
return target_group in groups
def _target_member_in_target_group(self, target_group, target_member):
"""Check if target member in target group.
:param target_group: target group
:param target_member: target member
:return: True if target member in target group, else False
:raises: NexentaException if target group doesn't exist
"""
members = self.nms.stmf.list_targetgroup_members(target_group)
if not members:
return False
return target_member in members
def _lu_exists(self, zvol_name):
"""Check if LU exists on appliance.
:param zvol_name: Zvol name
:raises: NexentaException if zvol not exists
:return: True if LU exists, else False
"""
try:
return bool(self.nms.scsidisk.lu_exists(zvol_name))
except exception.NexentaException as exc:
if 'does not exist' not in exc.args[0]:
raise
return False
def _is_lu_shared(self, zvol_name):
"""Check if LU exists on appliance and shared.
:param zvol_name: Zvol name
:raises: NexentaException if Zvol not exist
:return: True if LU exists and shared, else False
"""
try:
shared = self.nms.scsidisk.lu_shared(zvol_name) > 0
except exception.NexentaException as exc:
if 'does not exist for zvol' not in exc.args[0]:
raise # Zvol does not exists
shared = False # LU does not exist
return shared
def create_export(self, _ctx, volume, connector):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
model_update = self._do_export(_ctx, volume)
return model_update
def ensure_export(self, _ctx, volume):
self._do_export(_ctx, volume)
def _do_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume)
target_group_name = self._get_target_group_name(target_name)
entry = None
if not self._lu_exists(zvol_name):
try:
entry = self.nms.scsidisk.create_lu(zvol_name, {})
except exception.NexentaException as exc:
if 'in use' not in exc.args[0]:
raise
LOG.info(_LI('Ignored LU creation error "%s" while ensuring '
'export.'), exc)
if not self._is_lu_shared(zvol_name):
try:
entry = self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name})
except exception.NexentaException as exc:
if 'view entry exists' not in exc.args[0]:
raise
LOG.info(_LI('Ignored LUN mapping entry addition error "%s" '
'while ensuring export.'), exc)
model_update = {}
if entry:
provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % {
'host': self.nms_host,
'port': self.configuration.nexenta_iscsi_target_portal_port,
'name': target_name,
'lun': entry['lun'],
}
model_update = {'provider_location': provider_location}
return model_update
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
target_name = self._get_target_name(volume)
self.targets[target_name].remove(volume['name'])
zvol_name = self._get_zvol_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug('Updating volume stats')
stats = self.nms.volume.get_child_props(
self.configuration.nexenta_volume, 'health|size|used|available')
total_amount = utils.str2gib_size(stats['size'])
free_amount = utils.str2gib_size(stats['available'])
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.nms_host,
'volume': self.volume
}
self._stats = {
'vendor_name': 'Nexenta',
'dedup': self.volume_deduplication,
'compression': self.volume_compression,
'description': self.volume_description,
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': total_amount,
'free_capacity_gb': free_amount,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'volume_backend_name': self.backend_name,
'location_info': location_info,
'iscsi_target_portal_port': self.iscsi_target_portal_port,
'nms_url': self.nms.url
}
def _collect_garbage(self, zfs_object):
"""Destroys ZFS parent objects
Recursively destroys ZFS parent volumes and snapshots if they are
marked as garbage
:param zfs_object: full path to a volume or a snapshot
"""
if zfs_object and zfs_object in self._needless_objects:
sp = zfs_object.split('/')
path = '/'.join(sp[:-1])
name = sp[-1]
if '@' in name: # it's a snapshot:
volume, snap = name.split('@')
parent = '/'.join((path, volume))
try:
self.nms.snapshot.destroy(zfs_object, '')
except exception.NexentaException as exc:
LOG.debug('Error occurred while trying to delete a '
'snapshot: %s', exc)
return
else:
try:
props = self.nms.zvol.get_child_props(
zfs_object, 'origin') or {}
except exception.NexentaException:
props = {}
parent = (props['origin'] if 'origin' in props and
props['origin'] else '')
try:
self.nms.zvol.destroy(zfs_object, '')
except exception.NexentaException as exc:
LOG.debug('Error occurred while trying to delete a '
'volume: %s', exc)
return
self._needless_objects.remove(zfs_object)
self._collect_garbage(parent)
def _mark_as_garbage(self, zfs_object):
"""Puts ZFS object into list for further removal
:param zfs_object: full path to a volume or a snapshot
"""
self._needless_objects.add(zfs_object)
|
|
import itertools, json, string, sys, math
from functools import wraps, partial
from contextlib import suppress
from collections import deque, defaultdict
import operator
from base64 import b16encode, b32encode, b64encode, b64encode, a85encode
from generators import G, chain, window, first
eprint = partial(print, file=sys.stderr, flush=True)
standard_types = { bool, bytearray, bytes, complex, dict, float, int, list, set, str, tuple }
standard_defaults = [i() for i in standard_types]
printables = {k:v for k,v in enumerate(string.printable)}
small_int_cyclers = zip(*(itertools.cycle(range(1, i)) for i in range(2, 7)))
kinda_random_small_int = map(sum, small_int_cyclers)
kinda_random_medium_int = (a*b for a,b in zip(kinda_random_small_int, kinda_random_small_int))
kinda_random_big_int = (a*b for a,b in zip(kinda_random_medium_int, kinda_random_medium_int))
encoders = b16encode, b32encode, b64encode, b64encode, a85encode
str_encode_or_ignore = partial(str.encode, errors='ignore')
bytes_decode_or_ignore = partial(bytes.decode, errors='ignore')
def cached_uniq(pipe):
cache = defaultdict(partial(deque, maxlen=4))
for i in pipe:
if i not in cache[type(i)]:
cache[type(i)].append(i)
yield i
def uniq(pipe):
prev = next(pipe)
for i in pipe:
if type(i) == type(prev) and i == prev:
continue
else:
yield i
prev = i
def hashable(o):
try:
hash(o)
return True
except:
return False
def hashable_or_none(o):
'''returns an object if it is hashable or just None'''
try:
hash(o)
return o
except:
return None
def flipped(fn):
'''this decorator allows generators to yield their output and their flipped output'''
assert callable(fn), fn
def flip(o):
if isinstance(o, str):
return ''.join(reversed(o))
elif isinstance(o, bytes):
return bytes(bytearray(reversed(o)))
elif isinstance(o, (bytearray, list, set, tuple)):
return type(o)(reversed(o))
else:
raise Exception('this wasnt worth flipping: {}'.format(o))
@wraps(fn)
def wrapper(*a, **k):
for i in fn(*a, **k):
yield i
yield flip(i)
return wrapper
def map_attempt(fn, iterable):
''' this works just like map but filters out crashes '''
assert callable(fn), fn
iterable = iter(iterable)
still_going = True
while still_going:
with suppress(Exception):
for i in iterable:
yield fn(i)
still_going = False
def harvest_bool_from_bool(o):
assert type(o) is bool, o
yield not o
yield o
def harvest_bytearray_from_bool(o):
assert type(o) is bool, o
yield bytearray(o)
yield bytearray(not o)
def harvest_bytes_from_bool(o):
assert type(o) is bool, o
yield bytes(o)
yield bytes(not o)
def harvest_complex_from_bool(o):
assert type(o) is bool, o
yield complex(o)
yield complex(not o)
def harvest_dict_from_bool(o):
assert type(o) is bool, o
global standard_defaults
for i in map(hashable_or_none, standard_defaults):
yield {o:i}
yield {i:o}
yield {i:o, o:i}
def harvest_float_from_bool(o):
assert type(o) is bool, o
yield float(o)
yield float(not o)
def harvest_int_from_bool(o):
assert type(o) is bool, o
yield int(o)
yield int(not o)
def harvest_list_from_bool(o):
assert type(o) is bool, o
for i in range(1, 8):
yield [o] * i
def harvest_set_from_bool(o):
assert type(o) is bool, o
yield {o}
yield {not o}
yield {o, not o}
def harvest_str_from_bool(o):
assert type(o) is bool, o
yield json.dumps(o)
yield repr(o)
int_o = int(o)
yield str(int_o)
yield bin(int_o)
yield bytes(int_o).decode()
def harvest_tuple_from_bool(o):
assert type(o) is bool, o
yield from map(tuple, harvest_list_from_bool(o))
def harvest_bool_from_bytearray(o):
assert type(o) is bytearray, o
yield from harvest_bool_from_bool(bool(o))
for i in harvest_list_from_bytearray(o):
if isinstance(i, int):
yield from harvest_bool_from_int(i)
def harvest_bytearray_from_bytearray(o):
assert type(o) is bytearray, o
for i in range(1, 9):
tmp = o * i
yield tmp
tmp.reverse()
yield tmp
def harvest_bytes_from_bytearray(o):
assert type(o) is bytearray, o
yield from map(bytes, harvest_bytearray_from_bytearray(o))
def harvest_complex_from_bytearray(o):
assert type(o) is bytearray, o
yield complex(len(o), len(o))
yield from G(harvest_bytearray_from_bytearray(o)
).chain(
).window(2
).map(lambda i:[complex(i[0]), complex(i[1]), complex(*i)]
).chain()
def harvest_dict_from_bytearray(o):
assert type(o) is bytearray, o
yield from harvest_dict_from_list(list(o))
yield from harvest_dict_from_list(list(map(chr, o)))
def harvest_float_from_bytearray(o):
assert type(o) is bytearray, o
yield from harvest_float_from_float(float(len(o) * len(o)))
if o:
for i in harvest_bytearray_from_bytearray(o):
yield float.fromhex(o.hex())
for ii in i:
yield from harvest_float_from_int(i)
def harvest_int_from_bytearray(o):
assert type(o) is bytearray, o
for i in [o, o.upper(), o.lower()]:
yield int.from_bytes(o, 'little')
yield int.from_bytes(o, 'big')
@flipped
def harvest_list_from_bytearray(o):
assert type(o) is bytearray, o
for x in range(-1, 2):
yield [i+x for i in o]
yield [(i+x)%2 for i in o]
yield [i+x for i in o if i%2]
yield [i+x for i in o if not i%2]
def harvest_set_from_bytearray(o):
assert type(o) is bytearray, o
yield from map(set, harvest_list_from_bytearray(o))
def harvest_str_from_bytearray(o):
assert type(o) is bytearray, o
for l in harvest_list_from_bytearray(o):
with suppress(Exception):
yield ''.join(map(chr, l))
def harvest_tuple_from_bytearray(o):
assert type(o) is bytearray, o
yield from map(tuple, harvest_list_from_bytearray(o))
def harvest_bool_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_bool_from_int(len(o))
for i in o:
yield from (x=='1' for x in bin(i)[2:])
def harvest_bytearray_from_bytes(o):
assert type(o) is bytes, o
yield from map(bytearray, harvest_bytes_from_bytes(o))
def harvest_bytes_from_bytes(o):
assert type(o) is bytes, o
yield bytes(o)
byte_pipe = lambda:map(lambda i:i%256, harvest_int_from_bytes(o))
yield from map(bytes, byte_pipe())
for ints in window(byte_pipe(), 8):
for i in range(1, 8):
yield bytes(ints[:i])
yield bytes(ints[:i]) * i
def harvest_complex_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_complex_from_int(len(o))
for a, b in window(harvest_int_from_bytes(o), 2):
yield complex(a, b)
def harvest_dict_from_bytes(o):
assert type(o) is bytes, o
for l in harvest_list_from_bytes(o):
yield from harvest_dict_from_list(l)
def harvest_float_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_float_from_list(list(o))
for a, b in window(harvest_int_from_bytes(o), 2):
yield float(a * b)
def harvest_int_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_int_from_list(list(o))
for i in o:
yield from harvest_int_from_int(i)
@flipped
def harvest_list_from_bytes(o):
assert type(o) is bytes, o
yield [i for i in o]
yield [bool(i) for i in o]
yield [str(i) for i in o]
yield [float(i) for i in o]
def harvest_set_from_bytes(o):
assert type(o) is bytes, o
yield from map(set, harvest_list_from_bytes(o))
def harvest_str_from_bytes(o):
assert type(o) is bytes, o
for b in harvest_bytes_from_bytes(o):
yield bytes_decode_or_ignore(b)
def harvest_tuple_from_bytes(o):
assert type(o) is bytes, o
yield from map(tuple, harvest_list_from_bytes(o))
def harvest_bool_from_complex(o):
assert type(o) is complex, o
yield from harvest_bool_from_float(o.imag)
yield from harvest_bool_from_float(o.real)
def harvest_bytearray_from_complex(o):
assert type(o) is complex, o
yield from harvest_bytearray_from_float(o.imag)
yield from harvest_bytearray_from_float(o.real)
def harvest_bytes_from_complex(o):
assert type(o) is complex, o
yield from harvest_bytes_from_float(o.imag)
yield from harvest_bytes_from_float(o.real)
def harvest_complex_from_complex(o):
assert type(o) is complex, o
for a, b in window(harvest_int_from_float(o.imag), 2):
yield complex(a, b)
for a, b in window(harvest_int_from_float(o.real), 2):
yield complex(a, b)
def harvest_dict_from_complex(o):
assert type(o) is complex, o
yield from harvest_dict_from_float(o.imag)
yield from harvest_dict_from_float(o.real)
def harvest_float_from_complex(o):
assert type(o) is complex, o
yield from harvest_float_from_float(o.imag)
yield from harvest_float_from_float(o.real)
def harvest_int_from_complex(o):
assert type(o) is complex, o
yield from harvest_int_from_float(o.imag)
yield from harvest_int_from_float(o.real)
def harvest_list_from_complex(o):
assert type(o) is complex, o
yield from harvest_list_from_float(o.imag)
yield from harvest_list_from_float(o.real)
def harvest_set_from_complex(o):
assert type(o) is complex, o
yield from harvest_set_from_float(o.imag)
yield from harvest_set_from_float(o.real)
def harvest_str_from_complex(o):
assert type(o) is complex, o
yield from harvest_str_from_float(o.imag)
yield from harvest_str_from_float(o.real)
def harvest_tuple_from_complex(o):
assert type(o) is complex, o
yield from map(tuple, harvest_list_from_complex(o))
def remutate_dict(o, output_type):
assert type(o) is dict, o
assert output_type in standard_types
if not o:
yield output_type()
for k, v in o.items():
if type(k) in standard_types:
yield from mutate(k, output_type)
if not isinstance(v, dict) and type(v) in standard_types: # prevent infinite mutations
yield from mutate(v, output_type)
def harvest_bool_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, bool)
def harvest_bytearray_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, bytearray)
def harvest_bytes_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, bytes)
def harvest_complex_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, complex)
def harvest_dict_from_dict(o):
assert type(o) is dict, o
for key_subset in harvest_list_from_list(list(o.keys())):
yield {k:o[k] for k in key_subset}
def harvest_float_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, float)
def harvest_int_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, int)
@flipped
def harvest_list_from_dict(o):
assert type(o) is dict, o
yield list(o.keys())
yield list(o.values())
def harvest_tuple_from_dict(o):
assert type(o) is dict, o
yield from map(tuple, harvest_list_from_dict(o))
def harvest_set_from_dict(o):
assert type(o) is dict, o
yield set(o.keys())
yield from harvest_set_from_list(list(o.values()))
def harvest_str_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, str)
def harvest_bool_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_bool_from_int(i)
def harvest_bytearray_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_bytearray_from_int(i)
def harvest_bytes_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_bytes_from_int(i)
def harvest_complex_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_complex_from_int(i)
def harvest_dict_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_dict_from_int(i)
def harvest_float_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield o * i
yield o + i
yield o - i
yield i - o
def harvest_int_from_float(o):
assert type(o) is float, o
try:
o = o.as_integer_ratio()
yield from chain(map(harvest_int_from_int, o))
except (ValueError, OverflowError) as e:
yield from harvest_int_from_int(1)
def harvest_list_from_float(o):
assert type(o) is float, o
try:
a, b = o.as_integer_ratio()
except (ValueError, OverflowError) as e:
a, b = 1, 2
aa = abs(min(512, a))
bb = abs(min(512, b))
yield from harvest_list_from_list([o])
try:
yield [o] * aa
yield [o] * aa
yield [a] * bb
yield [b] * aa
yield [([o] * aa)] * bb
yield [([o] * bb)] * aa
yield [([o*a] * aa)] * bb
yield [([o*a] * bb)] * aa
yield [([o*b] * aa)] * bb
yield [([o*b] * bb)] * aa
except MemoryError:
pass
def harvest_set_from_float(o):
assert type(o) is float, o
for l in harvest_list_from_float(o):
yield from harvest_set_from_list(l)
def harvest_str_from_float(o):
assert type(o) is float, o
yield str(o)
yield repr(o)
yield from map(chr, map(lambda i:i%1114112, harvest_int_from_float(o)))
def harvest_tuple_from_float(o):
assert type(o) is float, o
yield from map(tuple, harvest_list_from_float(o))
def harvest_bool_from_int(o):
assert type(o) is int, o
yield o % 2 == 1
yield o % 2 == 0
yield from (x=='1' for x in bin(o))
yield from (x=='1' for x in bin(o**2))
def harvest_bytearray_from_int(o):
assert type(o) is int, o
yield from map(bytearray, harvest_bytes_from_int(o))
def harvest_bytes_from_int(o):
assert type(o) is int, o
for ints in window(map(lambda i:i%256, harvest_int_from_int(o)), 8):
yield from (bytes(ints[:i]) for i in range(1, 8))
def harvest_complex_from_int(o):
assert type(o) is int, o
for a, b in window(harvest_int_from_int(o), 2):
yield complex(a, b)
def harvest_dict_from_int(o):
assert type(o) is int, o
for k, v in zip(harvest_str_from_int(o), harvest_int_from_int(o)):
yield {k:v for _,k,v in zip(range(min(16, max(1, v))), harvest_str_from_str(k), harvest_int_from_int(v))}
def harvest_float_from_int(o):
assert type(o) is int, o
for a, b in window(harvest_int_from_int(o), 2):
if a != 0:
yield b / a
if b != 0:
yield a / b
yield float(a * b)
def harvest_int_from_int(o):
assert type(o) is int, o
yield from (o+x for x in range(-10, 11))
yield from (o//x for x in range(-10, -1))
yield from (o//x for x in range(1, 11))
yield from (int(o*x) for x in range(-10, 11))
yield from (o%x for x in range(-10, -1))
yield from (o%x for x in range(1, 11))
@flipped
def harvest_list_from_int(o):
assert type(o) is int, o
bin_o = bin(o)[2:]
yield list(bin_o)
as_bools = [i=='1' for i in bin_o]
for i in range(1, len(as_bools)):
yield as_bools[:i]
yield as_bools[i:]
yield [(not x) for x in as_bools[:i]]
yield [(not x) for x in as_bools[i:]]
def harvest_set_from_int(o):
assert type(o) is int, o
yield from map(set, harvest_list_from_int(o))
def harvest_str_from_int(o):
assert type(o) is int, o
yield bin(o)
yield json.dumps(o)
chars = filter(bool, map_attempt(lambda i:(printables[i%len(printables)]), harvest_int_from_int(o)))
for l in kinda_random_small_int:
out = ''.join(c for _,c in zip(range(l), chars))
if out:
yield out
else:
break
def harvest_tuple_from_int(o):
assert type(o) is int, o
for i in harvest_list_from_int(o):
yield tuple(i)
yield tuple(set(i))
def harvest_bool_from_list(o):
assert type(o) is list, o
yield bool(o)
len_o = len(o)
for i in range(2,10):
yield bool(len_o % i)
as_bools = list(map(bool, o))
yield from as_bools
for i in as_bools:
yield not i
def harvest_bytearray_from_list(o):
assert type(o) is list, o
yield from map(bytearray, harvest_bytes_from_list(o))
def harvest_bytes_from_list(o):
assert type(o) is list, o
yield from map_attempt(str_encode_or_ignore, harvest_str_from_list(o))
def harvest_complex_from_list(o):
assert type(o) is list, o
for a, b in window(harvest_int_from_list(o)):
yield complex(a, b)
def harvest_dict_from_list(o):
assert type(o) is list, o
len_o = len(o)
yield {len_o: None}
yield {None: len_o}
yield {'data': o}
yield {'result': o}
o = itertools.cycle(o)
for i in range(1, int(len_o*2)):
with suppress(Exception):
yield {next(o):next(o) for _ in range(i)}
def harvest_float_from_list(o):
assert type(o) is list, o
yield float(len(o))
pipe = iter(harvest_int_from_list(o))
for a, b in zip(pipe, pipe):
yield float(a * b)
if b and a:
yield a/b
yield b/a
def harvest_int_from_list(o):
assert type(o) is list, o
yield from harvest_int_from_int(len(o))
for fn in [len, int, ord]:
yield from map_attempt(fn, o)
yield from str_encode_or_ignore(repr(o))
@flipped
def harvest_list_from_list(o):
assert type(o) is list, o
yield o
if o:
for i in range(1, int(math.sqrt(len(o)))+1):
yield [v for ii,v in enumerate(o) if not ii%i]
yield [v for ii,v in enumerate(o) if ii%i]
yield [i for i in o if i]
yield [i for i in o if not i]
def harvest_set_from_list(o):
assert type(o) is list, o
for l in harvest_list_from_list(o):
s = set(map(hashable_or_none, l))
yield {i for i in s if i is not None}
yield {i for i in s if i}
yield s
def harvest_str_from_list(o):
assert type(o) is list, o
yield repr(o)
for i in o:
with suppress(Exception):
yield i.decode() if isinstance(i, bytes) else str(i)
yield from map(repr, o)
for i in o:
with suppress(Exception):
as_bytes = bytes(i) if isinstance(i, int) else bytes(str(i), encoding='utf-8')
for encoder in encoders:
yield encoder(as_bytes).decode()
for i in o:
with suppress(Exception):
yield json.dumps(i)
def harvest_tuple_from_list(o):
assert type(o) is list, o
yield from map(tuple, harvest_list_from_list(o))
yield from map(tuple, harvest_set_from_list(o))
def harvest_bool_from_set(o):
assert type(o) is set, o
yield from harvest_bool_from_list(list(o))
def harvest_bytearray_from_set(o):
assert type(o) is set, o
yield from harvest_bytearray_from_list(list(o))
def harvest_bytes_from_set(o):
assert type(o) is set, o
yield from harvest_bytes_from_list(list(o))
def harvest_complex_from_set(o):
assert type(o) is set, o
yield from harvest_complex_from_list(list(o))
def harvest_dict_from_set(o):
assert type(o) is set, o
yield from harvest_dict_from_list(list(o))
def harvest_float_from_set(o):
assert type(o) is set, o
yield from harvest_float_from_list(list(o))
def harvest_int_from_set(o):
assert type(o) is set, o
yield from harvest_int_from_list(list(o))
@flipped
def harvest_list_from_set(o):
assert type(o) is set, o
yield from harvest_list_from_list(list(o))
def harvest_set_from_set(o):
assert type(o) is set, o
yield from harvest_set_from_list(list(o))
def harvest_str_from_set(o):
assert type(o) is set, o
yield from harvest_str_from_list(list(o))
def harvest_tuple_from_set(o):
assert type(o) is set, o
yield from map(tuple, harvest_list_from_set(o))
def harvest_bool_from_str(o):
assert type(o) is str, o
yield from harvest_bool_from_list(list(o))
yield from (bool(ord(ch)%2) for ch in o)
def harvest_bytearray_from_str(o):
assert type(o) is str, o
yield from map(bytearray, harvest_bytes_from_str(o))
def harvest_bytes_from_str(o):
assert type(o) is str, o
yield from map(str.encode, harvest_str_from_str(o))
def harvest_complex_from_str(o):
assert type(o) is str, o
yield from harvest_complex_from_list(list(o))
for a, b in window(harvest_int_from_str(o), 2):
yield complex(a, b)
def harvest_dict_from_str(o):
assert type(o) is str, o
yield {o: None}
yield {None: o}
yield {o: o}
yield {o: {o: None}}
yield {o: {o: o}}
yield from harvest_dict_from_dict({a:b for a,b in zip(*([iter(o)]*2))})
def harvest_float_from_str(o):
assert type(o) is str, o
yield from harvest_float_from_float(float(len(o)))
for a, b in window(filter(bool, map(ord, o)), 2):
yield a * b
yield a / b
yield b / a
def harvest_int_from_str(o):
assert type(o) is str, o
yield from harvest_int_from_int(len(o))
yield from map(ord, o)
@flipped
def harvest_list_from_str(o):
assert type(o) is str, o
yield from harvest_list_from_list(list(o))
yield from harvest_list_from_list(list(map(ord, o)))
def harvest_set_from_str(o):
assert type(o) is str, o
for l in harvest_list_from_str(o):
yield from harvest_set_from_list(l)
def harvest_str_from_str(o):
assert type(o) is str, o
yield o.upper()
yield o.lower()
yield o.strip()
common_chars = ['\n', '"', "'", ' ', '\t', '.', ',', ':']
yield from (o.replace(old_char, new_char) for old_char, new_char in itertools.combinations(common_chars, 2) if old_char in o)
yield ''.join(x for x in o if x.isnumeric())
yield ''.join(x for x in o if not x.isnumeric())
def harvest_tuple_from_str(o):
assert type(o) is str, o
yield from map(tuple, harvest_list_from_str(o))
def harvest_bool_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_bool_from_bool(bool(o))
yield from map(bool, o)
def harvest_bytearray_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_bytearray_from_list(list(o))
def harvest_bytes_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_bytes_from_list(list(o))
def harvest_complex_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_complex_from_list(list(o))
def harvest_dict_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_dict_from_list(list(o))
def harvest_float_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_float_from_list(list(o))
def harvest_int_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_int_from_list(list(o))
@flipped
def harvest_list_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_list_from_list(list(o))
def harvest_set_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_set_from_list(list(o))
def harvest_str_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_str_from_list(list(o))
def harvest_tuple_from_tuple(o):
assert type(o) is tuple, o
yield from map(tuple, harvest_list_from_tuple(o))
mutation_map = {
(bool, bool): harvest_bool_from_bool,
(bool, bytearray): harvest_bytearray_from_bool,
(bool, bytes): harvest_bytes_from_bool,
(bool, complex): harvest_complex_from_bool,
(bool, dict): harvest_dict_from_bool,
(bool, float): harvest_float_from_bool,
(bool, int): harvest_int_from_bool,
(bool, list): harvest_list_from_bool,
(bool, set): harvest_set_from_bool,
(bool, str): harvest_str_from_bool,
(bool, tuple): harvest_tuple_from_bool,
(bytearray, bool): harvest_bool_from_bytearray,
(bytearray, bytearray): harvest_bytearray_from_bytearray,
(bytearray, bytes): harvest_bytes_from_bytearray,
(bytearray, complex): harvest_complex_from_bytearray,
(bytearray, dict): harvest_dict_from_bytearray,
(bytearray, float): harvest_float_from_bytearray,
(bytearray, int): harvest_int_from_bytearray,
(bytearray, list): harvest_list_from_bytearray,
(bytearray, set): harvest_set_from_bytearray,
(bytearray, str): harvest_str_from_bytearray,
(bytearray, tuple): harvest_tuple_from_bytearray,
(bytes, bool): harvest_bool_from_bytes,
(bytes, bytearray): harvest_bytearray_from_bytes,
(bytes, bytes): harvest_bytes_from_bytes,
(bytes, complex): harvest_complex_from_bytes,
(bytes, dict): harvest_dict_from_bytes,
(bytes, float): harvest_float_from_bytes,
(bytes, int): harvest_int_from_bytes,
(bytes, list): harvest_list_from_bytes,
(bytes, set): harvest_set_from_bytes,
(bytes, str): harvest_str_from_bytes,
(bytes, tuple): harvest_tuple_from_bytes,
(complex, bool): harvest_bool_from_complex,
(complex, bytearray): harvest_bytearray_from_complex,
(complex, bytes): harvest_bytes_from_complex,
(complex, complex): harvest_complex_from_complex,
(complex, dict): harvest_dict_from_complex,
(complex, float): harvest_float_from_complex,
(complex, int): harvest_int_from_complex,
(complex, list): harvest_list_from_complex,
(complex, set): harvest_set_from_complex,
(complex, str): harvest_str_from_complex,
(complex, tuple): harvest_tuple_from_complex,
(dict, bool): harvest_bool_from_dict,
(dict, bytearray): harvest_bytearray_from_dict,
(dict, bytes): harvest_bytes_from_dict,
(dict, complex): harvest_complex_from_dict,
(dict, dict): harvest_dict_from_dict,
(dict, float): harvest_float_from_dict,
(dict, int): harvest_int_from_dict,
(dict, list): harvest_list_from_dict,
(dict, set): harvest_set_from_dict,
(dict, str): harvest_str_from_dict,
(dict, tuple): harvest_tuple_from_dict,
(float, bool): harvest_bool_from_float,
(float, bytearray): harvest_bytearray_from_float,
(float, bytes): harvest_bytes_from_float,
(float, complex): harvest_complex_from_float,
(float, dict): harvest_dict_from_float,
(float, float): harvest_float_from_float,
(float, int): harvest_int_from_float,
(float, list): harvest_list_from_float,
(float, set): harvest_set_from_float,
(float, str): harvest_str_from_float,
(float, tuple): harvest_tuple_from_float,
(int, bool): harvest_bool_from_int,
(int, bytearray): harvest_bytearray_from_int,
(int, bytes): harvest_bytes_from_int,
(int, complex): harvest_complex_from_int,
(int, dict): harvest_dict_from_int,
(int, float): harvest_float_from_int,
(int, int): harvest_int_from_int,
(int, list): harvest_list_from_int,
(int, set): harvest_set_from_int,
(int, str): harvest_str_from_int,
(int, tuple): harvest_tuple_from_int,
(list, bool): harvest_bool_from_list,
(list, bytearray): harvest_bytearray_from_list,
(list, bytes): harvest_bytes_from_list,
(list, complex): harvest_complex_from_list,
(list, dict): harvest_dict_from_list,
(list, float): harvest_float_from_list,
(list, int): harvest_int_from_list,
(list, list): harvest_list_from_list,
(list, set): harvest_set_from_list,
(list, str): harvest_str_from_list,
(list, tuple): harvest_tuple_from_list,
(set, bool): harvest_bool_from_set,
(set, bytearray): harvest_bytearray_from_set,
(set, bytes): harvest_bytes_from_set,
(set, complex): harvest_complex_from_set,
(set, dict): harvest_dict_from_set,
(set, float): harvest_float_from_set,
(set, int): harvest_int_from_set,
(set, list): harvest_list_from_set,
(set, set): harvest_set_from_set,
(set, str): harvest_str_from_set,
(set, tuple): harvest_tuple_from_set,
(str, bool): harvest_bool_from_str,
(str, bytearray): harvest_bytearray_from_str,
(str, bytes): harvest_bytes_from_str,
(str, complex): harvest_complex_from_str,
(str, dict): harvest_dict_from_str,
(str, float): harvest_float_from_str,
(str, int): harvest_int_from_str,
(str, list): harvest_list_from_str,
(str, set): harvest_set_from_str,
(str, str): harvest_str_from_str,
(str, tuple): harvest_tuple_from_str,
(tuple, bool): harvest_bool_from_tuple,
(tuple, bytearray): harvest_bytearray_from_tuple,
(tuple, bytes): harvest_bytes_from_tuple,
(tuple, complex): harvest_complex_from_tuple,
(tuple, dict): harvest_dict_from_tuple,
(tuple, float): harvest_float_from_tuple,
(tuple, int): harvest_int_from_tuple,
(tuple, list): harvest_list_from_tuple,
(tuple, set): harvest_set_from_tuple,
(tuple, str): harvest_str_from_tuple,
(tuple, tuple): harvest_tuple_from_tuple
}
for type_combo in itertools.product(standard_types, repeat=2):
assert type_combo in mutation_map, type_combo
def mutate(o, output_type):
''' this function takes an input object and runs mutations on it to harvest
inputs of the specified output type. this allows battle_tested to create
more test inputs without needing to rely on random generation '''
global mutation_map
assert isinstance(mutation_map, dict), mutation_map
assert all(type(k) is tuple for k in mutation_map), mutation_map
assert all(len(k) is 2 for k in mutation_map), mutation_map
assert all(all(type(t)==type for t in k) for k in mutation_map), mutation_map
assert o is not type, o
assert output_type in standard_types, output_type
if o is None:
o = False
def mutator():
if isinstance(o, output_type):
for i in mutation_map[type(o), output_type](o):
yield i
else:
for i in mutation_map[type(o), output_type](o):
yield i
yield from mutation_map[type(i), output_type](i)
return cached_uniq(mutator())
def warn_about_duplicates(pipe):
last = None
count = 0
current_dup = None
for a, b in window(pipe, 2):
if a == b and type(a) == type(b):
current_dup = a
count += 1
elif count > 0:
eprint('WARNING: found', count, 'duplicates of', repr(current_dup))
count = 0
yield a
last = b
yield last
def test_all_mutations():
tests = len(standard_types) * len(standard_defaults)
done = 0
count = 0
for start_variable in standard_defaults:
for output_type in standard_types:
ran = False
done += 1
eprint(done, '/', tests, 'testing harvest_{}_from_{}'.format(output_type.__name__, type(start_variable).__name__))
for v in first(mutate(start_variable, output_type), 10000000):
ran = True
assert type(v) is output_type, v
count += 1
assert ran, locals()
eprint('success: created', count, 'inputs')
if __name__ == '__main__':
for _ in range(1):
for c,i in enumerate(harvest_complex_from_bytearray(bytearray(b'hi'))):
continue #print('-', i, type(i))
for i in mutate({'name':'billy'}, int):
continue #print(i)
#print(c)
for test in "hello world why don't we get some waffles or something? 7777".split(' '):
for _type in (str, dict, list, bool, int, float):
for i,v in enumerate(warn_about_duplicates(mutate(test, _type))):
continue #print(repr(v))
#print(i)
test_all_mutations()
|
|
from wagtail.wagtailcore import blocks
from wagtail.wagtailimages import blocks as images_blocks
from wagtail.wagtailsnippets.blocks import SnippetChooserBlock
from . import atoms, molecules
from ..util import ref
from ..models.snippets import Contact as ContactSnippetClass
class Well(blocks.StructBlock):
content = blocks.RichTextBlock(required=False, label='Well')
class Meta:
icon = 'title'
template = '_includes/organisms/well.html'
classname = 'block__flush'
class ImageText5050Group(blocks.StructBlock):
heading = blocks.CharBlock(icon='title', required=False)
image_texts = blocks.ListBlock(molecules.ImageText5050())
class Meta:
icon = 'image'
template = '_includes/organisms/image-text-50-50-group.html'
class ImageText2575Group(blocks.StructBlock):
heading = blocks.CharBlock(icon='title', required=False)
image_texts = blocks.ListBlock(molecules.ImageText2575())
class Meta:
icon = 'image'
template = '_includes/organisms/image-text-25-75-group.html'
class HalfWidthLinkBlobGroup(blocks.StructBlock):
heading = blocks.CharBlock(icon='title', required=False)
link_blobs = blocks.ListBlock(molecules.HalfWidthLinkBlob())
class Meta:
icon = 'link'
template = '_includes/organisms/half-width-link-blob-group.html'
class PostPreview(blocks.StructBlock):
heading = blocks.CharBlock(required=False)
body = blocks.RichTextBlock(required=False)
image = atoms.ImageBasic(required=False)
post = blocks.PageChooserBlock(required=False)
link = atoms.Hyperlink(required=False)
class Meta:
icon = 'view'
template = '_includes/organisms/post-preview.html'
class PostPreviewSnapshot(blocks.StructBlock):
limit = blocks.CharBlock(default='3', label='Limit',
help_text='How many posts do you want to show?')
post_date_description = blocks.CharBlock(default='Published')
class Meta:
icon = 'order'
class EmailSignUp(blocks.StructBlock):
heading = blocks.CharBlock(required=False)
text = blocks.CharBlock(required=False)
gd_code = blocks.CharBlock(required=False)
form_field = blocks.ListBlock(molecules.FormFieldWithButton(), icon='mail', required=False)
class Meta:
icon = 'mail'
template = '_includes/organisms/email-signup.html'
class RegComment(blocks.StructBlock):
document_id = blocks.CharBlock(required=True, label='Document ID',
help_text='Federal Register document ID number to which the comment should be submitted. Should follow this format: CFPB-YYYY-####-####')
generic_regs_link = blocks.BooleanBlock(required=False, default=True,
label='Use generic Regs.gov link?',
help_text='If unchecked, the link to comment at Regulations.gov if you want to add attachments will link directly to the document given above. Leave this checked if this comment form is being published before the full document is live at Regulations.gov, then uncheck it when the full document has been published.')
id = blocks.CharBlock(required=False, label='Form ID',
help_text='Sets the `id` attribute in the form\'s markup. If not set, the form will be assigned a base id of `o-reg-comment_` with a random number appended.')
class Meta:
icon = 'form'
template = '_includes/organisms/reg-comment.html'
class RelatedPosts(blocks.StructBlock):
limit = blocks.CharBlock(default='3', label='Limit')
show_heading = blocks.BooleanBlock(required=False, default=True,
label='Show Heading and Icon?',
help_text='This toggles the heading and'
+ ' icon for the related types.')
header_title = blocks.CharBlock(default='Further reading', label='Slug Title')
relate_posts = blocks.BooleanBlock(required=False, default=True,
label='Blog Posts', editable=False)
relate_newsroom = blocks.BooleanBlock(required=False, default=True,
label='Newsroom', editable=False)
relate_events = blocks.BooleanBlock(required=False, default=True,
label='Events')
specific_categories = blocks.ListBlock(blocks.ChoiceBlock(choices=ref.related_posts_categories, required=False), required=False)
class Meta:
icon = 'link'
template = '_includes/molecules/related-posts.html'
class MainContactInfo(blocks.StructBlock):
header = blocks.CharBlock(required=False)
body = blocks.RichTextBlock(required=False)
contact = SnippetChooserBlock(ContactSnippetClass)
class Meta:
icon = 'wagtail'
template = '_includes/organisms/main-contact-info.html'
class SidebarContactInfo(blocks.StructBlock):
header = blocks.CharBlock(required=False)
body = blocks.RichTextBlock(required=False)
contact = SnippetChooserBlock(ContactSnippetClass)
class Meta:
icon = 'wagtail'
template = '_includes/organisms/sidebar-contact-info.html'
class Table(blocks.StructBlock):
headers = blocks.ListBlock(blocks.CharBlock())
rows = blocks.ListBlock(blocks.StreamBlock([
('hyperlink', atoms.Hyperlink(required=False)),
('text', blocks.CharBlock()),
('text_blob', blocks.TextBlock()),
('rich_text_blob', blocks.RichTextBlock()),
]))
class Meta:
icon = 'form'
template = '_includes/organisms/table.html'
label = 'Table'
class FullWidthText(blocks.StreamBlock):
content = blocks.RichTextBlock(icon='edit')
media = images_blocks.ImageChooserBlock(icon='image')
quote = molecules.Quote()
cta = molecules.CallToAction()
related_links = molecules.RelatedLinks()
table = Table()
class Meta:
icon = 'edit'
template = '_includes/organisms/full-width-text.html'
class ExpandableGroup(blocks.StructBlock):
heading = blocks.CharBlock(required=False)
body = blocks.RichTextBlock(required=False)
is_accordion = blocks.BooleanBlock(required=False)
has_rule = blocks.BooleanBlock(required=False)
expandables = blocks.ListBlock(molecules.Expandable())
class Meta:
icon = 'list-ul'
template = '_includes/organisms/expandable-group.html'
class Media:
js = ["expandable-group.js"]
class ItemIntroduction(blocks.StructBlock):
category = blocks.ChoiceBlock(choices=ref.categories, required=False)
heading = blocks.CharBlock(required=False)
paragraph = blocks.RichTextBlock(required=False)
date = blocks.DateTimeBlock(required=False)
has_social = blocks.BooleanBlock(required=False, help_text="Whether to show the share icons or not.")
class Meta:
icon = 'form'
template = '_includes/organisms/item-introduction.html'
classname = 'block__flush-top'
class FilterControls(molecules.BaseExpandable):
form_type = blocks.ChoiceBlock(choices=[
('filterable-list', 'Filterable List'),
('pdf-generator', 'PDF Generator'),
], default='filterable-list')
title = blocks.BooleanBlock(default=True, required=False,
label='Filter Title')
post_date_description = blocks.CharBlock(default='Published')
categories = blocks.StructBlock([
('filter_category', blocks.BooleanBlock(default=True, required=False)),
('show_preview_categories', blocks.BooleanBlock(default=True, required=False)),
('page_type', blocks.ChoiceBlock(choices=ref.page_types,
required=False)),
])
topics = blocks.BooleanBlock(default=True, required=False,
label='Filter Topics')
authors = blocks.BooleanBlock(default=True, required=False,
label='Filter Authors')
date_range = blocks.BooleanBlock(default=True, required=False,
label='Filter Date Range')
class Meta:
label = 'Filter Controls'
icon = 'form'
class Media:
js = ['notification.js', 'expandable.js', 'filterable-list-controls.js']
|
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for caching master images.
"""
import os
import tempfile
import time
from oslo.config import cfg
from oslo_concurrency import lockutils
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import images
from ironic.common import utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
img_cache_opts = [
cfg.BoolOpt('parallel_image_downloads',
default=False,
help='Run image downloads and raw format conversions in '
'parallel.'),
]
CONF = cfg.CONF
CONF.register_opts(img_cache_opts)
# This would contain a sorted list of instances of ImageCache to be
# considered for cleanup. This list will be kept sorted in non-increasing
# order of priority.
_cache_cleanup_list = []
class ImageCache(object):
"""Class handling access to cache for master images."""
def __init__(self, master_dir, cache_size, cache_ttl,
image_service=None):
"""Constructor.
:param master_dir: cache directory to work on
:param cache_size: desired maximum cache size in bytes
:param cache_ttl: cache entity TTL in seconds
:param image_service: Glance image service to use, None for default
"""
self.master_dir = master_dir
self._cache_size = cache_size
self._cache_ttl = cache_ttl
self._image_service = image_service
if master_dir is not None:
fileutils.ensure_tree(master_dir)
def fetch_image(self, href, dest_path, ctx=None, force_raw=True):
"""Fetch image by given href to the destination path.
Does nothing if destination path exists.
Only creates a link if master image for this UUID is already in cache.
Otherwise downloads an image and also stores it in cache.
:param href: image UUID or href to fetch
:param dest_path: destination file path
:param ctx: context
:param force_raw: boolean value, whether to convert the image to raw
format
"""
img_download_lock_name = 'download-image'
if self.master_dir is None:
# NOTE(ghe): We don't share images between instances/hosts
if not CONF.parallel_image_downloads:
with lockutils.lock(img_download_lock_name, 'ironic-'):
_fetch(ctx, href, dest_path, self._image_service,
force_raw)
else:
_fetch(ctx, href, dest_path, self._image_service, force_raw)
return
# TODO(ghe): have hard links and counts the same behaviour in all fs
master_file_name = service_utils.parse_image_ref(href)[0]
master_path = os.path.join(self.master_dir, master_file_name)
if CONF.parallel_image_downloads:
img_download_lock_name = 'download-image:%s' % master_file_name
# TODO(dtantsur): lock expiration time
with lockutils.lock(img_download_lock_name, 'ironic-'):
if os.path.exists(dest_path):
LOG.debug("Destination %(dest)s already exists for "
"image %(uuid)s" %
{'uuid': href,
'dest': dest_path})
return
try:
# NOTE(dtantsur): ensure we're not in the middle of clean up
with lockutils.lock('master_image', 'ironic-'):
os.link(master_path, dest_path)
except OSError:
LOG.info(_LI("Master cache miss for image %(uuid)s, "
"starting download"),
{'uuid': href})
else:
LOG.debug("Master cache hit for image %(uuid)s",
{'uuid': href})
return
self._download_image(
href, master_path, dest_path, ctx=ctx, force_raw=force_raw)
# NOTE(dtantsur): we increased cache size - time to clean up
self.clean_up()
def _download_image(self, href, master_path, dest_path, ctx=None,
force_raw=True):
"""Download image by href and store at a given path.
This method should be called with uuid-specific lock taken.
:param href: image UUID or href to fetch
:param master_path: destination master path
:param dest_path: destination file path
:param ctx: context
:param force_raw: boolean value, whether to convert the image to raw
format
"""
# TODO(ghe): timeout and retry for downloads
# TODO(ghe): logging when image cannot be created
tmp_dir = tempfile.mkdtemp(dir=self.master_dir)
tmp_path = os.path.join(tmp_dir, href.split('/')[-1])
try:
_fetch(ctx, href, tmp_path, self._image_service, force_raw)
# NOTE(dtantsur): no need for global lock here - master_path
# will have link count >1 at any moment, so won't be cleaned up
os.link(tmp_path, master_path)
os.link(master_path, dest_path)
finally:
utils.rmtree_without_raise(tmp_dir)
@lockutils.synchronized('master_image', 'ironic-')
def clean_up(self, amount=None):
"""Clean up directory with images, keeping cache of the latest images.
Files with link count >1 are never deleted.
Protected by global lock, so that no one messes with master images
after we get listing and before we actually delete files.
:param amount: if present, amount of space to reclaim in bytes,
cleaning will stop, if this goal was reached,
even if it is possible to clean up more files
"""
if self.master_dir is None:
return
LOG.debug("Starting clean up for master image cache %(dir)s" %
{'dir': self.master_dir})
amount_copy = amount
listing = _find_candidates_for_deletion(self.master_dir)
survived, amount = self._clean_up_too_old(listing, amount)
if amount is not None and amount <= 0:
return
amount = self._clean_up_ensure_cache_size(survived, amount)
if amount is not None and amount > 0:
LOG.warn(_LW("Cache clean up was unable to reclaim %(required)d "
"MiB of disk space, still %(left)d MiB required"),
{'required': amount_copy / 1024 / 1024,
'left': amount / 1024 / 1024})
def _clean_up_too_old(self, listing, amount):
"""Clean up stage 1: drop images that are older than TTL.
This method removes files all files older than TTL seconds
unless 'amount' is non-None. If 'amount' is non-None,
it starts removing files older than TTL seconds,
oldest first, until the required 'amount' of space is reclaimed.
:param listing: list of tuples (file name, last used time)
:param amount: if not None, amount of space to reclaim in bytes,
cleaning will stop, if this goal was reached,
even if it is possible to clean up more files
:returns: tuple (list of files left after clean up,
amount still to reclaim)
"""
threshold = time.time() - self._cache_ttl
survived = []
for file_name, last_used, stat in listing:
if last_used < threshold:
try:
os.unlink(file_name)
except EnvironmentError as exc:
LOG.warn(_LW("Unable to delete file %(name)s from "
"master image cache: %(exc)s"),
{'name': file_name, 'exc': exc})
else:
if amount is not None:
amount -= stat.st_size
if amount <= 0:
amount = 0
break
else:
survived.append((file_name, last_used, stat))
return survived, amount
def _clean_up_ensure_cache_size(self, listing, amount):
"""Clean up stage 2: try to ensure cache size < threshold.
Try to delete the oldest files until conditions is satisfied
or no more files are eligable for delition.
:param listing: list of tuples (file name, last used time)
:param amount: amount of space to reclaim, if possible.
if amount is not None, it has higher priority than
cache size in settings
:returns: amount of space still required after clean up
"""
# NOTE(dtantsur): Sort listing to delete the oldest files first
listing = sorted(listing,
key=lambda entry: entry[1],
reverse=True)
total_listing = (os.path.join(self.master_dir, f)
for f in os.listdir(self.master_dir))
total_size = sum(os.path.getsize(f)
for f in total_listing)
while listing and (total_size > self._cache_size or
(amount is not None and amount > 0)):
file_name, last_used, stat = listing.pop()
try:
os.unlink(file_name)
except EnvironmentError as exc:
LOG.warn(_LW("Unable to delete file %(name)s from "
"master image cache: %(exc)s"),
{'name': file_name, 'exc': exc})
else:
total_size -= stat.st_size
if amount is not None:
amount -= stat.st_size
if total_size > self._cache_size:
LOG.info(_LI("After cleaning up cache dir %(dir)s "
"cache size %(actual)d is still larger than "
"threshold %(expected)d"),
{'dir': self.master_dir, 'actual': total_size,
'expected': self._cache_size})
return max(amount, 0)
def _find_candidates_for_deletion(master_dir):
"""Find files eligible for deletion i.e. with link count ==1.
:param master_dir: directory to operate on
:returns: iterator yielding tuples (file name, last used time, stat)
"""
for filename in os.listdir(master_dir):
filename = os.path.join(master_dir, filename)
stat = os.stat(filename)
if not os.path.isfile(filename) or stat.st_nlink > 1:
continue
# NOTE(dtantsur): Detect most recently accessed files,
# seeing atime can be disabled by the mount option
# Also include ctime as it changes when image is linked to
last_used_time = max(stat.st_mtime, stat.st_atime, stat.st_ctime)
yield filename, last_used_time, stat
def _free_disk_space_for(path):
"""Get free disk space on a drive where path is located."""
stat = os.statvfs(path)
return stat.f_frsize * stat.f_bavail
def _fetch(context, image_href, path, image_service=None, force_raw=False):
"""Fetch image and convert to raw format if needed."""
path_tmp = "%s.part" % path
images.fetch(context, image_href, path_tmp, image_service,
force_raw=False)
# Notes(yjiang5): If glance can provide the virtual size information,
# then we can firstly clean cach and then invoke images.fetch().
if force_raw:
required_space = images.converted_size(path_tmp)
directory = os.path.dirname(path_tmp)
_clean_up_caches(directory, required_space)
images.image_to_raw(image_href, path, path_tmp)
else:
os.rename(path_tmp, path)
def _clean_up_caches(directory, amount):
"""Explicitly cleanup caches based on their priority (if required).
:param directory: the directory (of the cache) to be freed up.
:param amount: amount of space to reclaim.
:raises: InsufficientDiskSpace exception, if we cannot free up enough space
after trying all the caches.
"""
free = _free_disk_space_for(directory)
if amount < free:
return
# NOTE(dtantsur): filter caches, whose directory is on the same device
st_dev = os.stat(directory).st_dev
caches_to_clean = [x[1]() for x in _cache_cleanup_list]
caches = (c for c in caches_to_clean
if os.stat(c.master_dir).st_dev == st_dev)
for cache_to_clean in caches:
cache_to_clean.clean_up(amount=(amount - free))
free = _free_disk_space_for(directory)
if amount < free:
break
else:
raise exception.InsufficientDiskSpace(path=directory,
required=amount / 1024 / 1024,
actual=free / 1024 / 1024,
)
def clean_up_caches(ctx, directory, images_info):
"""Explicitly cleanup caches based on their priority (if required).
This cleans up the caches to free up the amount of space required for the
images in images_info. The caches are cleaned up one after the other in
the order of their priority. If we still cannot free up enough space
after trying all the caches, this method throws exception.
:param ctx: context
:param directory: the directory (of the cache) to be freed up.
:param images_info: a list of tuples of the form (image_uuid,path)
for which space is to be created in cache.
:raises: InsufficientDiskSpace exception, if we cannot free up enough space
after trying all the caches.
"""
total_size = sum(images.download_size(ctx, uuid)
for (uuid, path) in images_info)
_clean_up_caches(directory, total_size)
def cleanup(priority):
"""Decorator method for adding cleanup priority to a class."""
def _add_property_to_class_func(cls):
_cache_cleanup_list.append((priority, cls))
_cache_cleanup_list.sort(reverse=True)
return cls
return _add_property_to_class_func
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache. This script is a modified version
# of the one created by the Apache Falcon project (https://github.com/apache/falcon/blob/master/falcon_merge_pr.py).
#
# Usage: python carbon_pr.py
#
#
#
import json
import os
import re
import subprocess
import sys
import urllib2
import getpass
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
PROJECT_NAME = "carbondata"
CAPITALIZED_PROJECT_NAME = "carbondata".upper()
# Location of the local git repository
REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, os.getcwd())
# Remote name which points to the GitHub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_USER = os.environ.get("GITHUB_USER", "apache")
GITHUB_BASE = "https://github.com/%s/%s/pull" % (GITHUB_USER, PROJECT_NAME)
GITHUB_API_BASE = "https://api.github.com/repos/%s/%s" % (GITHUB_USER, PROJECT_NAME)
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
TEMP_BRANCH_PREFIX = "PR_TOOL"
# TODO Introduce a convention as this is too brittle
RELEASE_BRANCH_PREFIX = "0."
DEV_BRANCH_NAME = "master"
DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "trunk")
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print "Exceeded the GitHub API rate limit; see the instructions in " + \
"carbon_pr.py to configure an OAuth token for making authenticated " + \
"GitHub requests."
else:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print cmd
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
if original_head != get_current_branch():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(TEMP_BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
def get_current_branch():
return run_cmd("git rev-parse --abbrev-ref HEAD").replace("\n", "")
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (TEMP_BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (TEMP_BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = raw_input(
"Enter primary author in the format of \"name <email>\" [%s]: " %
distinct_authors[0])
if primary_author == "":
primary_author = distinct_authors[0]
reviewers = raw_input(
"Enter reviewers in the format of \"name1 <email1>, name2 <email2>\": ").strip()
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n")
if len(commits) > 1:
result = raw_input("List pull request commits in squashed commit message? (y/n): ")
if result.lower() == "y":
should_list_commits = True
else:
should_list_commits = False
else:
should_list_commits = False
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of the project.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if (reviewers != ""):
merge_message_flags += ["-m", "Reviewers: %s" % reviewers]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
close_line = "Closes #%s from %s" % (pr_num, pr_repo_desc)
if should_list_commits:
close_line += " and squashes the following commits:"
merge_message_flags += ["-m", close_line]
if should_list_commits:
merge_message_flags += ["-m", "\n".join(commits)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (TEMP_BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == DEV_BRANCH_NAME:
versions = filter(lambda x: x == DEFAULT_FIX_VERSION, versions)
if len(versions) > 0:
return versions[0]
else:
return None
else:
versions = filter(lambda x: x.startswith(branch), versions)
if len(versions) > 0:
return versions[-1]
else:
return None
def get_jira_credentials():
global JIRA_USERNAME, JIRA_PASSWORD
if not JIRA_USERNAME:
JIRA_USERNAME = raw_input("JIRA_USERNAME environment variable is not set. Please enter your JIRA username:")
if not JIRA_PASSWORD:
JIRA_PASSWORD = getpass.getpass()
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions(CAPITALIZED_PROJECT_NAME)
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
version_names = map(lambda x: x.name, versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, version_names), merge_branches)
default_fix_versions = filter(lambda x: x != None, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions = jira_fix_versions,
comment = comment, resolution = {'id': resolution.raw['id']})
print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("%s-[0-9]{4,5}" % CAPITALIZED_PROJECT_NAME, title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the jira reference commit message prefix to "PROJECT_NAME-XXX Issue"
>>> standardize_jira_ref("%s-1563 Old feed instances get deleted from SLA monitoring on feed update" % CAPITALIZED_PROJECT_NAME)
'CARBONDATA-1563 Old feed instances get deleted from SLA monitoring on feed update'
>>> standardize_jira_ref("%s-1032. Test message with dot after id and ellipsis, ..." % CAPITALIZED_PROJECT_NAME)
'CARBONDATA-1032 Test message with dot after id and ellipsis, ...'
>>> standardize_jira_ref("%s-6250 %s-6146 %s-5911: Test multiple commit messages." % (CAPITALIZED_PROJECT_NAME, CAPITALIZED_PROJECT_NAME, CAPITALIZED_PROJECT_NAME))
'CARBONDATA-6250 CARBONDATA-6146 CARBONDATA-5911 Test multiple commit messages.'
>>> standardize_jira_ref("Message without JIRA id")
'Message without JIRA id'
>>> standardize_jira_ref("[CARBONDATA-1009] id in brackets")
'CARBONDATA-1009 id in brackets'
"""
jira_refs = []
components = []
# Extract JIRA ref(s):
pattern = re.compile(r'(%s[-\s]*[0-9]{3,6})+' % CAPITALIZED_PROJECT_NAME, re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append(re.sub(r'\s+', '-', ref.upper()))
text = text.replace(ref, '')
# Extract project name component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
jira_prefix = ' '.join(jira_refs).strip()
if jira_prefix:
jira_prefix = jira_prefix + " "
clean_text = jira_prefix + ' '.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def assert_remotes():
remotes = run_cmd("git remote -v").split()
status = 0
if "apache" not in remotes:
print "You don't have remote 'apache' configured. Please run this command:\n" + \
"git remote add apache https://git-wip-us.apache.org/repos/asf/carbondata.git\n"
status = -1
if "apache-github" not in remotes:
print "You don't have remote 'apache-github' configured. Please run this command:\n" + \
"git remote add apache-github https://github.com/apache/carbondata.git\n"
status = -1
if status != 0:
sys.exit(-1)
def main():
assert_remotes()
global original_head
original_head = get_current_branch()
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith(RELEASE_BRANCH_PREFIX), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = 'branch-0.1' if not branch_names else sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
pr_title = pr["title"]
commit_title = raw_input("Commit title [%s]: " % pr_title.encode("utf-8")).decode("utf-8")
if commit_title == "":
commit_title = pr_title
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(commit_title)
if modified_title != commit_title:
print "I've re-written the title as follows to match the standard format:"
print "Original: %s" % commit_title
print "Modified: %s" % modified_title
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
commit_title = modified_title
print "Using modified title:"
else:
print "Using original title:"
print commit_title
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("PR title\t%s\nCommit title\t%s\nSource\t\t%s\nTarget\t\t%s\nURL\t\t%s" % (
pr_title, commit_title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, commit_title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
continue_maybe("Would you like to update an associated JIRA?")
get_jira_credentials()
if JIRA_USERNAME and JIRA_PASSWORD:
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(commit_title, merged_refs, jira_comment)
else:
print "JIRA_USERNAME and JIRA_PASSWORD not set"
print "Exiting without trying to close the associated JIRA."
else:
print "Could not find jira-python library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
if __name__ == "__main__":
import doctest
failure_count, test_count = doctest.testmod()
if failure_count:
exit(-1)
main()
|
|
"""
State Space Representation, Kalman Filter, Smoother, and Simulation Smoother
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from .kalman_smoother import KalmanSmoother
from .tools import prefix_simulation_smoother_map
SIMULATION_STATE = 0x01
SIMULATION_DISTURBANCE = 0x04
SIMULATION_ALL = (
SIMULATION_STATE | SIMULATION_DISTURBANCE
)
class SimulationSmoother(KalmanSmoother):
r"""
State space representation of a time series process, with Kalman filter
and smoother, and with simulation smoother.
Parameters
----------
k_endog : array_like or integer
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
simulation_smooth_results_class : class, optional
Default results class to use to save output of simulation smoothing.
Default is `SimulationSmoothResults`. If specified, class must extend
from `SimulationSmoothResults`.
simulation_smoother_classes : dict, optional
Dictionary with BLAS prefixes as keys and classes as values.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices, for Kalman filtering options, for Kalman smoothing
options, or for Simulation smoothing options.
See `Representation`, `KalmanFilter`, and `KalmanSmoother` for more
details.
"""
simulation_outputs = [
'simulate_state', 'simulate_disturbance', 'simulate_all'
]
def __init__(self, k_endog, k_states, k_posdef=None,
simulation_smooth_results_class=None,
simulation_smoother_classes=None, **kwargs):
super(SimulationSmoother, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
if simulation_smooth_results_class is None:
simulation_smooth_results_class = SimulationSmoothResults
self.simulation_smooth_results_class = simulation_smooth_results_class
self.prefix_simulation_smoother_map = (
simulation_smoother_classes
if simulation_smoother_classes is not None
else prefix_simulation_smoother_map)
# Holder for an model-level simulation smoother objects, to use in
# simulating new time series.
self._simulators = {}
def get_simulation_output(self, simulation_output=None,
simulate_state=None, simulate_disturbance=None,
simulate_all=None, **kwargs):
r"""
Get simulation output bitmask
Helper method to get final simulation output bitmask from a set of
optional arguments including the bitmask itself and possibly boolean
flags.
Parameters
----------
simulation_output : integer, optional
Simulation output bitmask. If this is specified, it is simply
returned and the other arguments are ignored.
simulate_state : boolean, optional
Whether or not to include the state in the simulation output.
simulate_disturbance : boolean, optional
Whether or not to include the state and observation disturbances
in the simulation output.
simulate_all : boolean, optional
Whether or not to include all simulation output.
\*\*kwargs
Additional keyword arguments. Present so that calls to this method
can use \*\*kwargs without clearing out additional arguments.
"""
# If we don't explicitly have simulation_output, try to get it from
# kwargs
if simulation_output is None:
simulation_output = 0
if simulate_state == True:
simulation_output |= SIMULATION_STATE
if simulate_disturbance == True:
simulation_output |= SIMULATION_DISTURBANCE
if simulate_all == True:
simulation_output |= SIMULATION_ALL
# Handle case of no information in kwargs
if simulation_output == 0:
# If some arguments were passed, but we still don't have any
# simulation output, raise an exception
argument_set = not all([
simulate_state is None, simulate_disturbance is None,
simulate_all is None
])
if argument_set:
raise ValueError("Invalid simulation output options:"
" given options would result in no"
" output.")
# Otherwise set simulation output to be the same as smoother
# output
simulation_output = self.smoother_output
return simulation_output
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
prefix = self.prefix
# Create the simulator if necessary
if (prefix not in self._simulators or
nsimulations > self._simulators[prefix].nobs):
# Make sure we have the required Statespace representation
prefix, dtype, create_statespace = (
self._initialize_representation())
# Initialize the state
self._initialize_state(prefix=self.prefix)
simulation_output = 0
# Kalman smoother parameters
smoother_output = -1
# Kalman filter parameters
filter_method = self.filter_method
inversion_method = self.inversion_method
stability_method = self.stability_method
conserve_memory = self.conserve_memory
loglikelihood_burn = self.loglikelihood_burn
tolerance = self.tolerance
# Create a new simulation smoother object
cls = self.prefix_simulation_smoother_map[prefix]
self._simulators[prefix] = cls(
self._statespaces[prefix],
filter_method, inversion_method, stability_method,
conserve_memory, tolerance, loglikelihood_burn,
smoother_output, simulation_output, nsimulations, True
)
simulator = self._simulators[prefix]
# Set the disturbance variates
disturbance_variates = np.array(
np.r_[measurement_shocks.ravel(), state_shocks.ravel()],
dtype=self.dtype
).squeeze()
simulator.set_disturbance_variates(disturbance_variates)
# Set the intial state vector
initial_state_variates = np.array(
initial_state, dtype=self.dtype
).squeeze()
simulator.set_initial_state_variates(initial_state_variates)
# Perform simulation smoothing
# Note: simulation_output=-1 corresponds to whatever was setup when
# the simulation smoother was constructed
simulator.simulate(-1)
simulated_obs = np.array(simulator.generated_obs, copy=True)
simulated_state = np.array(simulator.generated_state, copy=True)
return (
simulated_obs[:, :nsimulations],
simulated_state[:, :nsimulations]
)
@property
def generated_state(self):
if self._generated_state is None:
self._generated_state = np.array(
self._simulation_smoother.generated_state, copy=True
)
return self._generated_state
return simulator.generated_obs, simulator.generated_state[:, :-1]
def simulation_smoother(self, simulation_output=None,
results_class=None, prefix=None, **kwargs):
r"""
Retrieve a simulation smoother for the statespace model.
Parameters
----------
simulation_output : int, optional
Determines which simulation smoother output is calculated.
Default is all (including state and disturbances).
simulation_smooth_results_class : class, optional
Default results class to use to save output of simulation
smoothing. Default is `SimulationSmoothResults`. If specified,
class must extend from `SimulationSmoothResults`.
prefix : string
The prefix of the datatype. Usually only used internally.
**kwargs
Additional keyword arguments, used to set the simulation output.
See `set_simulation_output` for more details.
Returns
-------
SimulationSmoothResults
"""
# Set the class to be the default results class, if None provided
if results_class is None:
results_class = self.simulation_smooth_results_class
# Instantiate a new results object
if not issubclass(results_class, SimulationSmoothResults):
raise ValueError('Invalid results class provided.')
# Make sure we have the required Statespace representation
if prefix is None:
prefix = self.prefix
prefix, dtype, create_statespace = (
self._initialize_representation(prefix)
)
# Simulation smoother parameters
simulation_output = self.get_simulation_output(simulation_output,
**kwargs)
# Kalman smoother parameters
smoother_output = kwargs.get('smoother_output', simulation_output)
# Kalman filter parameters
filter_method = kwargs.get('filter_method', self.filter_method)
inversion_method = kwargs.get('inversion_method',
self.inversion_method)
stability_method = kwargs.get('stability_method',
self.stability_method)
conserve_memory = kwargs.get('conserve_memory',
self.conserve_memory)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
tolerance = kwargs.get('tolerance', self.tolerance)
# Create a new simulation smoother object
cls = prefix_simulation_smoother_map[prefix]
simulation_smoother = cls(
self._statespaces[prefix],
filter_method, inversion_method, stability_method, conserve_memory,
tolerance, loglikelihood_burn, smoother_output, simulation_output
)
# Create results object
results = results_class(self, simulation_smoother)
return results
class SimulationSmoothResults(object):
r"""
Results from applying the Kalman smoother and/or filter to a state space
model.
Parameters
----------
model : Representation
A Statespace representation
simulation_smoother : {{prefix}}SimulationSmoother object
The Cython simulation smoother object with which to simulation smooth.
Attributes
----------
model : Representation
A Statespace representation
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
simulation_output : integer
Bitmask controlling simulation output.
simulate_state : boolean
Flag for if the state is included in simulation output.
simulate_disturbance : boolean
Flag for if the state and observation disturbances are included in
simulation output.
simulate_all : boolean
Flag for if simulation output should include everything.
generated_obs : array
Generated observation vector produced as a byproduct of simulation
smoothing.
generated_state : array
Generated state vector produced as a byproduct of simulation smoothing.
simulated_state : array
Simulated state.
simulated_measurement_disturbance : array
Simulated measurement disturbance.
simulated_state_disturbance : array
Simulated state disturbance.
"""
def __init__(self, model, simulation_smoother):
self.model = model
self.prefix = model.prefix
self.dtype = model.dtype
self._simulation_smoother = simulation_smoother
# Output
self._generated_obs = None
self._generated_state = None
self._simulated_state = None
self._simulated_measurement_disturbance = None
self._simulated_state_disturbance = None
@property
def simulation_output(self):
return self._simulation_smoother.simulation_output
@simulation_output.setter
def simulation_output(self, value):
self._simulation_smoother.simulation_output = value
@property
def simulate_state(self):
return bool(self.simulation_output & SIMULATION_STATE)
@simulate_state.setter
def simulate_state(self, value):
if bool(value):
self.simulation_output = self.simulation_output | SIMULATION_STATE
else:
self.simulation_output = self.simulation_output & ~SIMULATION_STATE
@property
def simulate_disturbance(self):
return bool(self.simulation_output & SIMULATION_DISTURBANCE)
@simulate_disturbance.setter
def simulate_disturbance(self, value):
if bool(value):
self.simulation_output = self.simulation_output | SIMULATION_DISTURBANCE
else:
self.simulation_output = self.simulation_output & ~SIMULATION_DISTURBANCE
@property
def simulate_all(self):
return bool(self.simulation_output & SIMULATION_ALL)
@simulate_all.setter
def simulate_all(self, value):
if bool(value):
self.simulation_output = self.simulation_output | SIMULATION_ALL
else:
self.simulation_output = self.simulation_output & ~SIMULATION_ALL
@property
def generated_obs(self):
if self._generated_obs is None:
self._generated_obs = np.array(
self._simulation_smoother.generated_obs, copy=True
)
return self._generated_obs
@property
def generated_state(self):
if self._generated_state is None:
self._generated_state = np.array(
self._simulation_smoother.generated_state, copy=True
)
return self._generated_state
@property
def simulated_state(self):
if self._simulated_state is None:
self._simulated_state = np.array(
self._simulation_smoother.simulated_state, copy=True
)
return self._simulated_state
@property
def simulated_measurement_disturbance(self):
if self._simulated_measurement_disturbance is None:
self._simulated_measurement_disturbance = np.array(
self._simulation_smoother.simulated_measurement_disturbance,
copy=True
)
return self._simulated_measurement_disturbance
@property
def simulated_state_disturbance(self):
if self._simulated_state_disturbance is None:
self._simulated_state_disturbance = np.array(
self._simulation_smoother.simulated_state_disturbance,
copy=True
)
return self._simulated_state_disturbance
def simulate(self, simulation_output=-1, disturbance_variates=None,
initial_state_variates=None):
r"""
Perform simulation smoothing
Does not return anything, but populates the object's `simulated_*`
attributes, as specified by simulation output.
Parameters
----------
simulation_output : integer, optional
Bitmask controlling simulation output. Default is to use the
simulation output defined in object initialization.
disturbance_variates : array_likes, optional
Random values to use as disturbance variates. Usually only
specified if results are to be replicated (e.g. to enforce a seed)
or for testing. If not specified, random variates are drawn.
initial_state_variates : array_likes, optional
Random values to use as initial state variates. Usually only
specified if results are to be replicated (e.g. to enforce a seed)
or for testing. If not specified, random variates are drawn.
"""
# Clear any previous output
self._generated_obs = None
self._generated_state = None
self._simulated_state = None
self._simulated_measurement_disturbance = None
self._simulated_state_disturbance = None
# Re-initialize the _statespace representation
self.model._initialize_representation(prefix=self.prefix)
# Initialize the state
self.model._initialize_state(prefix=self.prefix)
# Draw the (independent) random variates for disturbances in the
# simulation
if disturbance_variates is not None:
self._simulation_smoother.set_disturbance_variates(
np.array(disturbance_variates, dtype=self.dtype)
)
else:
self._simulation_smoother.draw_disturbance_variates()
# Draw the (independent) random variates for the initial states in the
# simulation
if initial_state_variates is not None:
self._simulation_smoother.set_initial_state_variates(
np.array(initial_state_variates, dtype=self.dtype)
)
else:
self._simulation_smoother.draw_initial_state_variates()
# Perform simulation smoothing
# Note: simulation_output=-1 corresponds to whatever was setup when
# the simulation smoother was constructed
self._simulation_smoother.simulate(simulation_output)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import re
import six
import yaml
from st2common.exceptions.workflow import WorkflowDefinitionException
from st2common import log as logging
from st2common.models.system.common import ResourceReference
from st2common.models.utils import action_param_utils
from st2common.util import action_db as action_utils
LOG = logging.getLogger(__name__)
CMD_PTRN = re.compile("^[\w\.]+[^=\s\"]*")
INLINE_YAQL = '<%.*?%>'
_ALL_IN_BRACKETS = "\[.*\]\s*"
_ALL_IN_QUOTES = "\"[^\"]*\"\s*"
_ALL_IN_APOSTROPHES = "'[^']*'\s*"
_DIGITS = "\d+"
_TRUE = "true"
_FALSE = "false"
_NULL = "null"
ALL = (
_ALL_IN_QUOTES, _ALL_IN_APOSTROPHES, INLINE_YAQL,
_ALL_IN_BRACKETS, _TRUE, _FALSE, _NULL, _DIGITS
)
PARAMS_PTRN = re.compile("([\w]+)=(%s)" % "|".join(ALL))
SPEC_TYPES = {
'adhoc': {
'action_key': 'base',
'input_key': 'base-input'
},
'task': {
'action_key': 'action',
'input_key': 'input'
}
}
def _parse_cmd_and_input(cmd_str):
cmd_matcher = CMD_PTRN.search(cmd_str)
if not cmd_matcher:
raise ValueError("Invalid action/workflow task property: %s" % cmd_str)
cmd = cmd_matcher.group()
params = {}
for k, v in re.findall(PARAMS_PTRN, cmd_str):
# Remove embracing quotes.
v = v.strip()
if v[0] == '"' or v[0] == "'":
v = v[1:-1]
else:
try:
v = json.loads(v)
except Exception:
pass
params[k] = v
return cmd, params
def _merge_dicts(left, right):
if left is None:
return right
if right is None:
return left
for k, v in right.iteritems():
if k not in left:
left[k] = v
else:
left_v = left[k]
if isinstance(left_v, dict) and isinstance(v, dict):
_merge_dicts(left_v, v)
return left
def _eval_inline_params(spec, action_key, input_key):
action_str = spec.get(action_key)
command, inputs = _parse_cmd_and_input(action_str)
if inputs:
spec[action_key] = command
if input_key not in spec:
spec[input_key] = {}
_merge_dicts(spec[input_key], inputs)
def _validate_action_parameters(name, action, action_params):
requires, unexpected = action_param_utils.validate_action_parameters(action.ref, action_params)
if requires:
raise WorkflowDefinitionException('Missing required parameters in "%s" for action "%s": '
'"%s"' % (name, action.ref, '", "'.join(requires)))
if unexpected:
raise WorkflowDefinitionException('Unexpected parameters in "%s" for action "%s": '
'"%s"' % (name, action.ref, '", "'.join(unexpected)))
def _transform_action(name, spec):
action_key, input_key = None, None
for spec_type, spec_meta in six.iteritems(SPEC_TYPES):
if spec_meta['action_key'] in spec:
action_key = spec_meta['action_key']
input_key = spec_meta['input_key']
break
if not action_key:
return
if spec[action_key] == 'st2.callback':
raise WorkflowDefinitionException('st2.callback is deprecated.')
# Convert parameters that are inline (i.e. action: some_action var1={$.value1} var2={$.value2})
# and split it to action name and input dict as illustrated below.
#
# action: some_action
# input:
# var1: <% $.value1 %>
# var2: <% $.value2 %>
#
# This step to separate the action name and the input parameters is required
# to wrap them with the st2.action proxy.
#
# action: st2.action
# input:
# ref: some_action
# parameters:
# var1: <% $.value1 %>
# var2: <% $.value2 %>
_eval_inline_params(spec, action_key, input_key)
transformed = (spec[action_key] == 'st2.action')
action_ref = spec[input_key]['ref'] if transformed else spec[action_key]
action = None
# Identify if action is a registered StackStorm action.
if action_ref and ResourceReference.is_resource_reference(action_ref):
action = action_utils.get_action_by_ref(ref=action_ref)
# If action is a registered StackStorm action, then wrap the
# action with the st2 proxy and validate the action input.
if action:
if not transformed:
spec[action_key] = 'st2.action'
action_input = spec.get(input_key)
spec[input_key] = {'ref': action_ref}
if action_input:
spec[input_key]['parameters'] = action_input
action_input = spec.get(input_key, {})
action_params = action_input.get('parameters', {})
_validate_action_parameters(name, action, action_params)
def transform_definition(definition):
# If definition is a dictionary, there is no need to load from YAML.
is_dict = isinstance(definition, dict)
spec = copy.deepcopy(definition) if is_dict else yaml.safe_load(definition)
# Transform adhoc actions
for action_name, action_spec in six.iteritems(spec.get('actions', {})):
_transform_action(action_name, action_spec)
# Determine if definition is a workbook or workflow
is_workbook = 'workflows' in spec
# Transform tasks
if is_workbook:
for workflow_name, workflow_spec in six.iteritems(spec.get('workflows', {})):
if 'tasks' in workflow_spec:
for task_name, task_spec in six.iteritems(workflow_spec.get('tasks')):
_transform_action(task_name, task_spec)
else:
for key, value in six.iteritems(spec):
if 'tasks' in value:
for task_name, task_spec in six.iteritems(value.get('tasks')):
_transform_action(task_name, task_spec)
# Return the same type as original input.
return spec if is_dict else yaml.safe_dump(spec, default_flow_style=False)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 05/04/17 at 2:48 PM
@author: neil
Program description here
Version 0.0.0
"""
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from tqdm import tqdm
import os
try:
import periodogram_functions2 as pf2
except ModuleNotFoundError:
raise Exception("Program requires 'periodogram_functions.py'")
# =============================================================================
# Define variables
# =============================================================================
# Deal with choosing a target and data paths
WORKSPACE = "/Astro/Projects/RayPaul_Work/SuperWASP/"
# location of folder to plot files to
PLOTPATH = WORKSPACE + '/Plots/ls_analysis_run_3/'
# file to save periods to
PERIODPATH = WORKSPACE + '/Data/ls_analysis_run_3/'
PERIODPATH += 'light_curve_analysis_periods_regions.fits'
# if True and periodpath file exists we will skip entries that exist
SKIP_DONE = True
# Write to file (if false does not save to file)
WRITE_TO_FILE = True
# Reset - resets all files (i.e. deletes fits and graphs)
RESET = False
# -----------------------------------------------------------------------------
# set database settings
FROM_DATABASE = True
HOSTNAME = 'localhost'
USERNAME = 'root'
PASSWORD = '1234'
DATABASE = 'swasp'
TABLE = 'swasp_sep16_tab'
# program to run
COMMAND = 'python light_curve_analysis.py '
# -----------------------------------------------------------------------------
# whether to show the graph
SHOW = False
# size in inches of the plot
FIGSIZE = (20, 16)
# decide whether to plot nan periods (saves time)
PLOT_NAN_PERIOD = True
# whether to log progress to standard output (print)
LOG = True
# -----------------------------------------------------------------------------
# minimum time period to be sensitive to (5 hours)
TMIN = 5/24.0
# maximum time period to be sensitive to (100 days)
TMAX = 100
# number of samples per peak
SPP = 5
# -----------------------------------------------------------------------------
# random seed for bootstrapping
RANDOM_SEED = 9999
# number of bootstraps to perform
N_BS = 100
# Phase offset
OFFSET = (-0.5, 0.5)
# define the FAP percentiles
PERCENTILES = [pf2.sigma2percentile(1)*100,
pf2.sigma2percentile(2)*100,
pf2.sigma2percentile(3)*100]
# -----------------------------------------------------------------------------
# number of peaks to find
NPEAKS = 5
# number of pixels around a peak to class as same peak
BOXSIZE = 5
# percentage around noise peak to rule out true peak
THRESHOLD = 5.0
# percentile (FAP) to cut peaks at (i.e. any below are not used)
CUTPERCENTILE = pf2.sigma2percentile(1.0)*100
# -----------------------------------------------------------------------------
# minimum number of data points to define a sub region
MINPOINTS = 50 # points
# maximum gap between data points to define a sub region
MAXGAP = 20 # days
# -----------------------------------------------------------------------------
# Data cleaning
UNCERTAINTY_CLIP = 0.005
SIGMA_CLIP = 3.0
# =============================================================================
# Define functions
# =============================================================================
def reset(do_reset):
if do_reset:
uinput = input("Are you sure you wish to delete all current files?"
"[Y]es or [N]o?\t")
if "Y" in uinput.upper():
do_reset = True
else:
do_reset = False
if do_reset and os.path.exists(PERIODPATH):
print('\n Removing {0}'.format(PERIODPATH))
os.remove(PERIODPATH)
if do_reset and os.path.exists(PLOTPATH):
files = os.listdir(PLOTPATH)
for filename in files:
loc = PLOTPATH + '/' + filename
print('\n Removing {0}'.format(loc))
os.remove(loc)
def load_pre_existing_data(sids, dpath):
"""
Load pre-exisiting data
:param dpath: string, filepath to the pre-existing data
:return:
"""
if os.path.exists(dpath) and SKIP_DONE and WRITE_TO_FILE:
print("\n Loading pre existing files...")
atable = Table.read(dpath)
done_ids = list(atable['name'])
del atable
# ---------------------------------------------------------------------
# skip sids if the are in table (assume this means they are done)
do_ids, done_sids = [], []
for done_id in done_ids:
raw_id = done_id.split('_Full')[0].split('_R')[0]
done_sids.append(raw_id)
done_sids = np.unique(done_sids)
skips = 0
for sid in sids:
if sid in done_sids:
skips += 1
else:
do_ids.append(sid)
# Print statement for how many files skipped due to pre existing data
print('\n Skipping {0} sids'.format(skips))
else:
print('\n Nothing skipped.')
do_ids = list(sids)
return do_ids
def sort_data_in_dict(s_id, sarr, earr, start, end):
"""
Sort the data based on the id and the start and end points (into the sarr
and earr dictionaries
:param s_id: string, ID of this object (will be used as key)
:param sarr: dict, start point list (list of time series data for each
segments i.e. if there are two segments list would be
[x pos start of segment 1, x pos start of segment 2]
:param earr: dict, end point list (list of time series data for each
segments i.e. if there are two segments list would be
[x pos end of segment 1, x pos end of segment 2]
:param start: float, the x starting position of this segment
:param end: float, the x ending position of this segment
:return:
"""
if s_id not in sarr:
sarr[s_id], earr[s_id] = [start], [end]
else:
sarr[s_id].append(start), earr[s_id].append(end)
return sarr, earr
def get_arguments_from_constants():
argstring = ''
for arg in list(globals().keys()):
if arg.isupper():
argstring += '{0}="{1}" '.format(arg, globals()[arg])
return argstring
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# -------------------------------------------------------------------------
# deal with reset
reset(RESET)
# get list of unique ids (for selecting each as a seperate curve)
sql_kwargs = dict(host=HOSTNAME, db=DATABASE, table=TABLE,
user=USERNAME, passwd=PASSWORD, conn_timeout=100000)
sids, conn = pf2.get_list_of_objects_from_db(conn=None, **sql_kwargs)
# -------------------------------------------------------------------------
# load file if it exists (to save on repeating on exit)
do_sids = load_pre_existing_data(sids, PERIODPATH)
# -------------------------------------------------------------------------
# construct python command and arguments
argumentstring = get_arguments_from_constants()
# -------------------------------------------------------------------------
# loop around SIDs
for s_it, sid in enumerate(do_sids):
# print progress
print('\n{0}\n\t {1} of {2} \n{0}'.format('+'*50, s_it+1, len(do_sids)))
# add SID to argument string
argumentstring += ' SID="{0}"'.format(sid)
# run python program for file
os.system(COMMAND + argumentstring)
# input('Enter to continue. Control+C to cancel')
# =============================================================================
# End of code
# =============================================================================
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
import jsonpickle
from cairis.core.Attacker import Attacker
from cairis.tools.JsonConverter import json_deserialize
from cairis.core.AttackerEnvironmentProperties import AttackerEnvironmentProperties
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
import os
from cairis.mio.ModelImport import importModelFile
__author__ = 'Robin Quetin, Shamal Faily'
class AttackerAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
# region Class fields
self.logger = logging.getLogger(__name__)
self.existing_attacker_name = 'Mallory'
self.existing_environment_name_1 = 'Stroke'
self.existing_environment_name_2 = 'Psychosis'
self.existing_motive_names = ['Hactivism', 'Money']
self.existing_role_names = ['Hacker', 'Developer']
self.existing_capabilities = [
{
'name':'Resources/Equipment',
'value': 'Low'
},
{
'name': 'Knowledge/Methods',
'value': 'High'
}
]
attacker_class = Attacker.__module__+'.'+Attacker.__name__
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/attackers?session_id=test')
if (sys.version_info > (3,)):
attackers = jsonpickle.decode(rv.data.decode('utf-8'))
else:
attackers = jsonpickle.decode(rv.data)
self.assertIsNotNone(attackers, 'No results after deserialization')
self.assertIsInstance(attackers, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(attackers), 0, 'No attackers in the dictionary')
self.logger.info('[%s] Attackers found: %d', method, len(attackers))
attacker = list(attackers.values())[0]
self.logger.info('[%s] First attacker: %s\n', method, attacker['theName'])
def test_get_all_summary(self):
method = 'test_get_all_summary'
rv = self.app.get('/api/attackers/summary?session_id=test')
if (sys.version_info > (3,)):
ats = json_deserialize(rv.data.decode('utf-8'))
else:
ats = json_deserialize(rv.data)
self.assertIsNotNone(ats, 'No results after deserialization')
self.assertGreater(len(ats), 0, 'No attacker summaries')
self.assertIsInstance(ats[0], dict)
self.logger.info('[%s] Attackers found: %d', method, len(ats))
self.logger.info('[%s] First attacker summary: %s [%s]\n', method, ats[0]['theName'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/attackers/name/%s?session_id=test' % quote(self.existing_attacker_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
if (sys.version_info > (3,)):
attacker = jsonpickle.decode(rv.data.decode('utf-8'))
else:
attacker = jsonpickle.decode(rv.data)
self.assertIsNotNone(attacker, 'No results after deserialization')
self.logger.info('[%s] Attacker: %s\n', method, attacker['theName'])
def test_delete(self):
method = 'test_delete'
url = '/api/attackers/name/%s?session_id=test' % quote(self.prepare_new_attacker().theName)
new_attacker_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_attacker_body)
self.app.post('/api/attackers', content_type='application/json', data=new_attacker_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.info('[%s] Response data: %s', method, responseData)
self.assertIsNotNone(responseData, 'No response')
json_resp = jsonpickle.decode(responseData)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
url = '/api/attackers'
self.logger.info('[%s] URL: %s', method, url)
new_attacker_body = self.prepare_json()
self.app.delete('/api/attackers/name/%s?session_id=test' % quote(self.prepare_new_attacker().theName))
rv = self.app.post(url, content_type='application/json', data=new_attacker_body)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
rv = self.app.delete('/api/attackers/name/%s?session_id=test' % quote(self.prepare_new_attacker().theName))
def test_put(self):
method = 'test_put'
url = '/api/attackers'
self.logger.info('[%s] URL: %s', method, url)
new_attacker_body = self.prepare_json()
rv = self.app.delete('/api/attackers/name/%s?session_id=test' % quote(self.prepare_new_attacker().theName))
rv = self.app.post(url, content_type='application/json', data=new_attacker_body)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
attacker_to_update = self.prepare_new_attacker()
attacker_to_update.theName = 'Edited test attacker'
attacker_to_update.theId = -1
upd_env_body = self.prepare_json(attacker=attacker_to_update)
rv = self.app.put('/api/attackers/name/%s?session_id=test' % quote(self.prepare_new_attacker().theName), data=upd_env_body, content_type='application/json')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.assertIsNotNone(responseData, 'No response')
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('updated'), -1, 'The attacker was not successfully updated')
rv = self.app.get('/api/attackers/name/%s?session_id=test' % quote(attacker_to_update.theName))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
upd_attacker = jsonpickle.decode(responseData)
self.assertIsNotNone(upd_attacker, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, responseData)
self.logger.info('[%s] Attacker: %s\n', method, upd_attacker['theName'])
rv = self.app.delete('/api/attackers/name/%s?session_id=test' % quote(attacker_to_update.theName))
def prepare_new_attacker(self):
new_attacker_props = [
AttackerEnvironmentProperties(
environmentName=self.existing_environment_name_1,
roles=self.existing_role_names,
motives=self.existing_motive_names,
capabilities=self.existing_capabilities
),
AttackerEnvironmentProperties(
environmentName=self.existing_environment_name_2,
roles=self.existing_role_names,
motives=self.existing_motive_names,
capabilities=self.existing_capabilities
)
]
new_attacker = Attacker(
attackerId=-1,
attackerName='Test attacker',
attackerDescription='This is a test attacker',
attackerImage='',
tags=['test', 'test123'],
environmentProperties=[]
)
new_attacker.theEnvironmentProperties = new_attacker_props
new_attacker.theEnvironmentDictionary = {}
new_attacker.theAttackerPropertyDictionary = {}
delattr(new_attacker, 'theEnvironmentDictionary')
delattr(new_attacker, 'theAttackerPropertyDictionary')
return new_attacker
def prepare_dict(self, attacker=None):
if attacker is None:
attacker = self.prepare_new_attacker()
else:
assert isinstance(attacker, Attacker)
return {
'session_id': 'test',
'object': attacker,
}
def prepare_json(self, data_dict=None, attacker=None):
if data_dict is None:
data_dict = self.prepare_dict(attacker=attacker)
else:
assert isinstance(data_dict, dict)
new_attacker_body = jsonpickle.encode(data_dict, unpicklable=False)
self.logger.info('JSON data: %s', new_attacker_body)
return new_attacker_body
|
|
import json
import pytest
from django.urls import reverse
from ..models import Integration
users = (
('owner', 'owner'),
('manager', 'manager'),
('author', 'author'),
('guest', 'guest'),
('api', 'api'),
('user', 'user'),
('site', 'site'),
('anonymous', None),
)
view_integration_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5],
'author': [1, 3, 5],
'guest': [1, 3, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5]
}
add_integration_permission_map = change_integration_permission_map = delete_integration_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5]
}
urlnames = {
'list': 'v1-projects:project-integration-list',
'detail': 'v1-projects:project-integration-detail'
}
projects = [1, 2, 3, 4, 5]
integrations = [1, 2]
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_list(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
response = client.get(url)
if project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 200
if username == 'user':
assert sorted([item['id'] for item in response.json()]) == []
else:
values_list = Integration.objects.filter(project_id=project_id) \
.order_by('id').values_list('id', flat=True)
assert sorted([item['id'] for item in response.json()]) == list(values_list)
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('integration_id', integrations)
def test_detail(db, client, username, password, project_id, integration_id):
client.login(username=username, password=password)
integration = Integration.objects.filter(project_id=project_id, id=integration_id).first()
url = reverse(urlnames['detail'], args=[project_id, integration_id])
response = client.get(url)
if integration and project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 200
assert response.json().get('id') == integration_id
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_create(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {
'provider_key': 'github',
'options': [
{
'key': 'repo',
'value': 'example/example'
}
]
}
response = client.post(url, data=json.dumps(data), content_type="application/json")
if project_id in add_integration_permission_map.get(username, []):
assert response.status_code == 201, response.content
elif project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_create_error1(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {
'provider_key': 'wrong',
'options': [
{
'key': 'repo',
'value': 'example/example'
}
]
}
response = client.post(url, data=json.dumps(data), content_type="application/json")
if project_id in add_integration_permission_map.get(username, []):
assert response.status_code == 400, response.json()
assert response.json()['provider_key'], response.json()
elif project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_create_error2(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {
'provider_key': 'github',
'options': [
{
'key': 'repo',
'value': ''
}
]
}
response = client.post(url, data=json.dumps(data), content_type="application/json")
if project_id in add_integration_permission_map.get(username, []):
assert response.status_code == 400, response.json()
assert response.json()['options'][0]['value'], response.json()
elif project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_create_error3(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {
'provider_key': 'github',
'options': [
{
'key': 'repo',
'value': 'example/example'
},
{
'key': 'foo',
'value': 'bar'
}
]
}
response = client.post(url, data=json.dumps(data), content_type="application/json")
if project_id in add_integration_permission_map.get(username, []):
assert response.status_code == 400, response.json()
assert 'foo' in response.json()['options'][0], response.json()
elif project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('integration_id', integrations)
def test_update(db, client, username, password, project_id, integration_id):
client.login(username=username, password=password)
integration = Integration.objects.filter(project_id=project_id, id=integration_id).first()
url = reverse(urlnames['detail'], args=[project_id, integration_id])
data = {
'provider_key': 'github',
'options': [
{
'key': 'repo',
'value': 'example/test'
}
]
}
response = client.put(url, data, content_type='application/json')
if integration and project_id in change_integration_permission_map.get(username, []):
assert response.status_code == 200
assert sorted(response.json().get('options'), key=lambda obj: obj['key']) == [
{
'key': 'repo',
'value': 'example/test'
},
{
'key': 'secret',
'value': ''
}
]
elif integration and project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('integration_id', integrations)
def test_delete(db, client, username, password, project_id, integration_id):
client.login(username=username, password=password)
integration = Integration.objects.filter(project_id=project_id, id=integration_id).first()
url = reverse(urlnames['detail'], args=[project_id, integration_id])
response = client.delete(url)
if integration and project_id in delete_integration_permission_map.get(username, []):
assert response.status_code == 204
elif integration and project_id in view_integration_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
|
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# W0603: Using the global statement
# W0621: Redefining name %s from outer scope
# pylint: disable=W0603,W0621
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
import six
from six import moves
from nova.openstack.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0, dict_value='Value'):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
:param dict_value: header label for the value (second) column
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
for k, v in sorted(dct.items()):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
|
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import xml.dom.minidom as xml
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from sahara import conductor as c
from sahara import context
from sahara.service.edp import base_engine
from sahara.service.edp.binary_retrievers import dispatch
from sahara.service.edp import hdfs_helper as h
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import oozie as o
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.validations.edp import job_execution as j
from sahara.utils import edp
from sahara.utils import remote
from sahara.utils import xmlutils as x
CONF = cfg.CONF
conductor = c.API
@six.add_metaclass(abc.ABCMeta)
class OozieJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
self.plugin = job_utils.get_plugin(self.cluster)
def get_remote_client(self):
return o.RemoteOozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster),
self.get_hdfs_user())
def get_client(self):
# by default engine will return standard oozie client implementation
return o.OozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster))
def _get_oozie_job_params(self, hdfs_user, path_to_workflow,
oozie_params, use_hbase_lib,
scheduled_params=None, job_dir=None,
job_execution_type=None):
oozie_libpath_key = "oozie.libpath"
oozie_libpath = ""
rm_path = self.get_resource_manager_uri(self.cluster)
nn_path = self.get_name_node_uri(self.cluster)
hbase_common_lib_path = "%s%s" % (nn_path, h.HBASE_COMMON_LIB_PATH)
if use_hbase_lib:
if oozie_libpath_key in oozie_params:
oozie_libpath = "%s,%s" % (oozie_params.get(oozie_libpath_key,
""), hbase_common_lib_path)
else:
oozie_libpath = hbase_common_lib_path
if job_execution_type == "scheduled":
app_path = "oozie.coord.application.path"
job_parameters = {
"start": scheduled_params.get('start'),
"end": scheduled_params.get('end'),
"frequency": scheduled_params.get('frequency'),
"workflowAppUri": "%s%s" % (nn_path, job_dir),
app_path: "%s%s" % (nn_path, job_dir)}
else:
app_path = "oozie.wf.application.path"
job_parameters = {
app_path: "%s%s" % (nn_path, path_to_workflow)}
job_parameters["nameNode"] = nn_path
job_parameters["user.name"] = hdfs_user
job_parameters["jobTracker"] = rm_path
job_parameters[oozie_libpath_key] = oozie_libpath
job_parameters["oozie.use.system.libpath"] = "true"
# Don't let the application path be overwritten, that can't
# possibly make any sense
if app_path in oozie_params:
del oozie_params[app_path]
if oozie_libpath_key in oozie_params:
del oozie_params[oozie_libpath_key]
job_parameters.update(oozie_params)
return job_parameters
def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "workflow.xml", job_dir, hdfs_user)
return "%s/workflow.xml" % job_dir
def _upload_coordinator_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "coordinator.xml", job_dir,
hdfs_user)
return "%s/coordinator.xml" % job_dir
def cancel_job(self, job_execution):
if job_execution.engine_job_id is not None:
client = self.get_client()
client.kill_job(job_execution)
return client.get_job_info(job_execution)
def get_job_status(self, job_execution):
if job_execution.engine_job_id is not None:
return self.get_client().get_job_info(job_execution)
def _prepare_run_job(self, job_execution):
ctx = context.ctx()
# This will be a dictionary of tuples, (native_url, runtime_url)
# keyed by data_source id
data_source_urls = {}
prepared_job_params = {}
job = conductor.job_get(ctx, job_execution.job_id)
input_source, output_source = job_utils.get_data_sources(
job_execution, job, data_source_urls, self.cluster)
# Updated_job_configs will be a copy of job_execution.job_configs with
# any name or uuid references to data_sources resolved to paths
# assuming substitution is enabled.
# If substitution is not enabled then updated_job_configs will
# just be a reference to job_execution.job_configs to avoid a copy.
# Additional_sources will be a list of any data_sources found.
additional_sources, updated_job_configs = (
job_utils.resolve_data_source_references(job_execution.job_configs,
job_execution.id,
data_source_urls,
self.cluster)
)
job_execution = conductor.job_execution_update(
ctx, job_execution,
{"data_source_urls": job_utils.to_url_dict(data_source_urls)})
# Now that we've recorded the native urls, we can switch to the
# runtime urls
data_source_urls = job_utils.to_url_dict(data_source_urls,
runtime=True)
proxy_configs = updated_job_configs.get('proxy_configs')
configs = updated_job_configs.get('configs', {})
use_hbase_lib = configs.get('edp.hbase_common_lib', {})
# Extract all the 'oozie.' configs so that they can be set in the
# job properties file. These are config values for Oozie itself,
# not the job code
oozie_params = {}
for k in list(configs):
if k.startswith('oozie.'):
oozie_params[k] = configs[k]
for data_source in [input_source, output_source] + additional_sources:
if data_source and data_source.type == 'hdfs':
h.configure_cluster_for_hdfs(
self.cluster, data_source_urls[data_source.id])
break
external_hdfs_urls = self._resolve_external_hdfs_urls(
job_execution.job_configs)
for url in external_hdfs_urls:
h.configure_cluster_for_hdfs(self.cluster, url)
hdfs_user = self.get_hdfs_user()
# TODO(tmckay): this should probably be "get_namenode"
# but that call does not exist in the oozie engine api now.
oozie_server = self.get_oozie_server(self.cluster)
wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
proxy_configs)
wf_xml = workflow_factory.get_workflow_xml(
job, self.cluster, updated_job_configs,
input_source, output_source,
hdfs_user, data_source_urls)
path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
wf_xml, hdfs_user)
prepared_job_params['context'] = ctx
prepared_job_params['hdfs_user'] = hdfs_user
prepared_job_params['path_to_workflow'] = path_to_workflow
prepared_job_params['use_hbase_lib'] = use_hbase_lib
prepared_job_params['job_execution'] = job_execution
prepared_job_params['oozie_params'] = oozie_params
prepared_job_params['wf_dir'] = wf_dir
prepared_job_params['oozie_server'] = oozie_server
return prepared_job_params
def run_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
path_to_workflow = prepared_job_params['path_to_workflow']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
job_params = self._get_oozie_job_params(hdfs_user,
path_to_workflow,
oozie_params,
use_hbase_lib)
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
conductor.job_execution_update(
context.ctx(), job_execution.id,
{'info': {'status': edp.JOB_STATUS_READYTORUN},
'engine_job_id': oozie_job_id})
client.run_job(job_execution, oozie_job_id)
try:
status = client.get_job_info(job_execution, oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
def run_scheduled_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
oozie_server = prepared_job_params['oozie_server']
wf_dir = prepared_job_params['wf_dir']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
coord_configs = {"jobTracker": "${jobTracker}",
"nameNode": "${nameNode}"}
coord_xml = self._create_coordinator_xml(coord_configs)
self._upload_coordinator_file(oozie_server, wf_dir, coord_xml,
hdfs_user)
job_params = self._get_oozie_job_params(
hdfs_user, None, oozie_params, use_hbase_lib,
job_execution.job_configs.job_execution_info, wf_dir,
"scheduled")
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
try:
status = client.get_job_status(job_execution,
oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
@abc.abstractmethod
def get_hdfs_user(self):
pass
@abc.abstractmethod
def create_hdfs_dir(self, remote, dir_name):
pass
@abc.abstractmethod
def get_oozie_server_uri(self, cluster):
pass
@abc.abstractmethod
def get_oozie_server(self, cluster):
pass
@abc.abstractmethod
def get_name_node_uri(self, cluster):
pass
@abc.abstractmethod
def get_resource_manager_uri(self, cluster):
pass
def validate_job_execution(self, cluster, job, data):
# Shell job type requires no specific fields
if job.type == edp.JOB_TYPE_SHELL:
return
# All other types except Java require input and output
# objects and Java require main class
if job.type == edp.JOB_TYPE_JAVA:
j.check_main_class_present(data, job)
else:
j.check_data_sources(data, job)
job_type, subtype = edp.split_job_type(job.type)
if job_type == edp.JOB_TYPE_MAPREDUCE and (
subtype == edp.JOB_SUBTYPE_STREAMING):
j.check_streaming_present(data, job)
@staticmethod
def get_possible_job_config(job_type):
return workflow_factory.get_possible_job_config(job_type)
@staticmethod
def get_supported_job_types():
return [edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_PIG,
edp.JOB_TYPE_SHELL]
def _upload_job_files_to_hdfs(self, where, job_dir, job, configs,
proxy_configs=None):
mains = job.mains or []
libs = job.libs or []
builtin_libs = edp.get_builtin_binaries(job, configs)
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
job_dir_suffix = 'lib' if job.type != edp.JOB_TYPE_SHELL else ''
lib_dir = os.path.join(job_dir, job_dir_suffix)
with remote.get_remote(where) as r:
for main in mains:
raw_data = dispatch.get_raw_binary(
main, proxy_configs=proxy_configs, remote=r)
if isinstance(raw_data, dict) and raw_data["type"] == "path":
h.copy_from_local(r, raw_data['path'],
job_dir, hdfs_user)
else:
h.put_file_to_hdfs(r, raw_data, main.name,
job_dir, hdfs_user)
uploaded_paths.append(job_dir + '/' + main.name)
if len(libs) and job_dir_suffix:
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, lib_dir)
for lib in libs:
raw_data = dispatch.get_raw_binary(
lib, proxy_configs=proxy_configs, remote=remote)
if isinstance(raw_data, dict) and raw_data["type"] == "path":
h.copy_from_local(r, raw_data['path'],
lib_dir, hdfs_user)
else:
h.put_file_to_hdfs(r, raw_data, lib.name,
lib_dir, hdfs_user)
uploaded_paths.append(lib_dir + '/' + lib.name)
for lib in builtin_libs:
h.put_file_to_hdfs(r, lib['raw'], lib['name'], lib_dir,
hdfs_user)
uploaded_paths.append(lib_dir + '/' + lib['name'])
return uploaded_paths
def _create_hdfs_workflow_dir(self, where, job):
constructed_dir = '/user/%s/' % self.get_hdfs_user()
constructed_dir = self._add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, uuidutils.generate_uuid())
with remote.get_remote(where) as r:
self.create_hdfs_dir(r, constructed_dir)
return constructed_dir
def _create_coordinator_xml(self, coord_configs, config_filter=None,
appname='coord'):
doc = xml.Document()
# Create the <coordinator-app> base element
coord = doc.createElement('coordinator-app')
coord.attributes['name'] = appname
coord.attributes['start'] = "${start}"
coord.attributes['end'] = "${end}"
coord.attributes['frequency'] = "${frequency}"
coord.attributes['timezone'] = 'UTC'
coord.attributes['xmlns'] = 'uri:oozie:coordinator:0.2'
doc.appendChild(coord)
action = doc.createElement('action')
workflow = doc.createElement('workflow')
coord.appendChild(action)
action.appendChild(workflow)
x.add_text_element_to_tag(doc, "workflow", 'app-path',
"${workflowAppUri}")
configuration = doc.createElement('configuration')
workflow.appendChild(configuration)
default_configs = []
if config_filter is not None:
default_configs = [cfg['name'] for cfg in config_filter]
for name in sorted(coord_configs):
if name in default_configs or config_filter is None:
x.add_property_to_configuration(doc, name, coord_configs[name])
# Return newly created XML
return doc.toprettyxml(indent=" ")
def _add_postfix(self, constructed_dir):
def _append_slash_if_needed(path):
if path[-1] != '/':
path += '/'
return path
constructed_dir = _append_slash_if_needed(constructed_dir)
if CONF.job_workflow_postfix:
constructed_dir = ''.join([str(constructed_dir),
str(CONF.job_workflow_postfix)])
return _append_slash_if_needed(constructed_dir)
def _resolve_external_hdfs_urls(self, job_configs):
external_hdfs_urls = []
for k, v in six.iteritems(job_configs.get('configs', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for k, v in six.iteritems(job_configs.get('params', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for v in job_configs.get('args', []):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
return external_hdfs_urls
def suspend_job(self, job_execution):
return self._manage_job(job_execution, edp.JOB_ACTION_SUSPEND)
def _manage_job(self, job_execution, action):
if job_execution.oozie_job_id is not None:
client = self.get_client()
if action == edp.JOB_ACTION_SUSPEND:
client.suspend_job(job_execution)
return client.get_job_status(job_execution)
|
|
"""Actors communicate with each other by sending and receiving messages.
The :mod:`pulsar.async.mailbox` module implements the message passing layer
via a bidirectional socket connections between the :class:`.Arbiter`
and any :class:`.Actor`.
Message sending is asynchronous and safe, the message is guaranteed to
eventually reach the recipient, provided that the recipient exists.
The implementation details are outlined below:
* Messages are sent via the :func:`.send` function, which is a proxy for
the actor :meth:`~.Actor.send` method.
Here is how you ping actor ``abc`` in a coroutine::
from pulsar.api import send
async def example():
result = await send('abc', 'ping')
* The :class:`.Arbiter` :attr:`~pulsar.Actor.mailbox` is a :class:`.TcpServer`
accepting connections from remote actors.
* The :attr:`.Actor.mailbox` is a :class:`.MailboxClient` of the arbiter
mailbox server.
* When an actor sends a message to another actor, the arbiter mailbox behaves
as a proxy server by routing the message to the targeted actor.
* Communication is bidirectional and there is **only one connection** between
the arbiter and any given actor.
* Messages are encoded and decoded using the unmasked websocket protocol
implemented in :func:`.frame_parser`.
* If, for some reasons, the connection between an actor and the arbiter
get broken, the actor will eventually stop running and garbaged collected.
Implementation
=========================
For the curious this is how the internal protocol is implemented
Protocol
~~~~~~~~~~~~
.. autoclass:: MessageConsumer
:members:
:member-order: bysource
Client
~~~~~~~~~~~~
.. autoclass:: MailboxClient
:members:
:member-order: bysource
"""
import socket
import pickle
import logging
from functools import partial
from collections import namedtuple
from inspect import isawaitable
from ..utils.exceptions import CommandError
from ..utils.internet import nice_address
from ..utils.websocket import frame_parser
from ..utils.string import gen_unique_id
from ..utils.lib import ProtocolConsumer
from .protocols import Connection
from .access import get_actor
from .proxy import actor_identity, get_proxy, get_command, ActorProxy
from .clients import AbstractClient
CommandRequest = namedtuple('CommandRequest', 'actor caller connection')
LOGGER = logging.getLogger('pulsar.mailbox')
def create_aid():
return gen_unique_id()[:8]
async def command_in_context(command, caller, target, args, kwargs,
connection=None):
cmnd = get_command(command)
if not cmnd:
raise CommandError('unknown %s' % command)
request = CommandRequest(target, caller, connection)
result = cmnd(request, args, kwargs)
try:
result = await result
except TypeError:
if isawaitable(result):
raise
return result
class ProxyMailbox:
'''A proxy for the arbiter :class:`Mailbox`.
'''
active_connections = 0
def __init__(self, actor):
mailbox = actor.monitor.mailbox
if isinstance(mailbox, ProxyMailbox):
mailbox = mailbox.mailbox
self.mailbox = mailbox
def __repr__(self):
return self.mailbox.__repr__()
def __str__(self):
return self.mailbox.__str__()
def __getattr__(self, name):
return getattr(self.mailbox, name)
def _run(self):
pass
def close(self):
pass
class MessageConsumer(ProtocolConsumer):
"""Protocol Consumer for Actor messages
"""
tasks = None
parser = None
worker = None
parser = None
debug = False
pending_responses = None
def start_request(self):
actor = get_actor()
self.parser = frame_parser(kind=2)
self.pending_responses = {}
self.tasks = {}
self.logger = actor.logger
self.debug = actor.cfg.debug
def feed_data(self, data):
msg = self.parser.decode(data)
while msg:
try:
message = pickle.loads(msg.body)
except Exception:
self.logger.exception('could not decode message body')
else:
# Avoid to create a task on callbacks
if message.get('command') == 'callback':
self._on_callback(message)
else:
task = self._loop.create_task(self._on_message(message))
self.tasks[message['id']] = task
msg = self.parser.decode()
def send(self, command, sender, target, args, kwargs):
"""Used by the server to send messages to the client.
Returns a future.
"""
command = get_command(command)
data = {'command': command.__name__,
'id': create_aid(),
'sender': actor_identity(sender),
'target': actor_identity(target),
'args': args if args is not None else (),
'kwargs': kwargs if kwargs is not None else {}}
waiter = self._loop.create_future()
ack = None
if command.ack:
ack = create_aid()
data['ack'] = ack
self.pending_responses[ack] = waiter
try:
self.write(data)
except Exception as exc:
waiter.set_exception(exc)
if ack:
self.pending_responses.pop(ack, None)
else:
if not ack:
waiter.set_result(None)
return waiter
def write(self, msg):
obj = pickle.dumps(msg, protocol=2)
data = self.parser.encode(obj, opcode=2)
try:
self.connection.write(data)
except (socket.error, RuntimeError):
actor = get_actor()
if actor.is_running() and not actor.is_arbiter():
self.logger.warning('Lost connection with arbiter')
self._loop.stop()
def _on_callback(self, message):
ack = message.get('ack')
if not ack:
self.logger.error('A callback without id')
else:
if self.debug:
self.logger.debug('Callback from "%s"', ack)
pending = self.pending_responses.pop(ack)
pending.set_result(message.get('result'))
async def _on_message(self, message):
try:
actor = get_actor()
command = message.get('command')
ack = message.get('ack')
try:
if self.debug:
self.logger.debug('Got message "%s"', command)
target = actor.get_actor(message['target'])
if target is None:
raise CommandError(
'cannot execute "%s", unknown actor '
'"%s"' % (command, message['target']))
# Get the caller proxy without throwing
caller = get_proxy(actor.get_actor(message['sender']),
safe=True)
if isinstance(target, ActorProxy):
# route the message to the actor proxy
if caller is None:
raise CommandError(
"'%s' got message from unknown '%s'"
% (actor, message['sender']))
result = await actor.send(target, command,
*message['args'],
**message['kwargs'])
else:
result = await command_in_context(command, caller,
target,
message['args'],
message['kwargs'],
self)
except CommandError as exc:
self.logger.warning('Command error: %s' % exc)
result = None
except Exception:
self.logger.exception('Unhandled exception')
result = None
if ack:
data = {'command': 'callback', 'result': result, 'ack': ack}
self.write(data)
finally:
self.tasks.pop(message['id'], None)
mailbox_protocol = partial(Connection, MessageConsumer)
class MailboxClient(AbstractClient):
"""Used by actors to send messages to other actors via the arbiter.
"""
def __init__(self, address, actor, loop):
super().__init__(mailbox_protocol, loop=loop,
name='%s-mailbox' % actor, logger=LOGGER)
self.address = address
self.connection = None
def connect(self):
return self.create_connection(self.address)
def __repr__(self):
return '%s %s' % (self.name, nice_address(self.address))
async def send(self, command, sender, target, args, kwargs):
if self.connection is None:
self.connection = await self.connect()
consumer = self.connection.current_consumer()
self.connection.event('connection_lost').bind(self._lost)
consumer.start()
else:
consumer = self.connection.current_consumer()
response = await consumer.send(command, sender, target, args, kwargs)
return response
def close(self):
if self.connection:
self.connection.abort()
def start_serving(self): # pragma nocover
pass
def _lost(self, _, exc=None):
# When the connection is lost, stop the event loop
if self._loop.is_running():
self._loop.stop()
|
|
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Interface for interacting with the Mycroft gui qml viewer. """
from os.path import join
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.util import resolve_resource_file
class SkillGUI:
"""SkillGUI - Interface to the Graphical User Interface
Values set in this class are synced to the GUI, accessible within QML
via the built-in sessionData mechanism. For example, in Python you can
write in a skill:
self.gui['temp'] = 33
self.gui.show_page('Weather.qml')
Then in the Weather.qml you'd access the temp via code such as:
text: sessionData.time
"""
def __init__(self, skill):
self.__session_data = {} # synced to GUI for use by this skill's pages
self.page = None # the active GUI page (e.g. QML template) to show
self.skill = skill
self.on_gui_changed_callback = None
self.config = Configuration.get()
@property
def remote_url(self):
"""Returns configuration value for url of remote-server."""
return self.config.get('remote-server')
def build_message_type(self, event):
"""Builds a message matching the output from the enclosure."""
return '{}.{}'.format(self.skill.skill_id, event)
def setup_default_handlers(self):
"""Sets the handlers for the default messages."""
msg_type = self.build_message_type('set')
self.skill.add_event(msg_type, self.gui_set)
def register_handler(self, event, handler):
"""Register a handler for GUI events.
When using the triggerEvent method from Qt
triggerEvent("event", {"data": "cool"})
Arguments:
event (str): event to catch
handler: function to handle the event
"""
msg_type = self.build_message_type(event)
self.skill.add_event(msg_type, handler)
def set_on_gui_changed(self, callback):
"""Registers a callback function to run when a value is
changed from the GUI.
Arguments:
callback: Function to call when a value is changed
"""
self.on_gui_changed_callback = callback
def gui_set(self, message):
"""Handler catching variable changes from the GUI.
Arguments:
message: Messagebus message
"""
for key in message.data:
self[key] = message.data[key]
if self.on_gui_changed_callback:
self.on_gui_changed_callback()
def __setitem__(self, key, value):
"""Implements set part of dict-like behaviour with named keys."""
self.__session_data[key] = value
if self.page:
# emit notification (but not needed if page has not been shown yet)
data = self.__session_data.copy()
data.update({'__from': self.skill.skill_id})
self.skill.bus.emit(Message("gui.value.set", data))
def __getitem__(self, key):
"""Implements get part of dict-like behaviour with named keys."""
return self.__session_data[key]
def __contains__(self, key):
"""Implements the "in" operation."""
return self.__session_data.__contains__(key)
def clear(self):
"""Reset the value dictionary, and remove namespace from GUI."""
self.__session_data = {}
self.page = None
self.skill.bus.emit(Message("gui.clear.namespace",
{"__from": self.skill.skill_id}))
def send_event(self, event_name, params={}):
"""Trigger a gui event.
Arguments:
event_name (str): name of event to be triggered
params: json serializable object containing any parameters that
should be sent along with the request.
"""
self.skill.bus.emit(Message("gui.event.send",
{"__from": self.skill.skill_id,
"event_name": event_name,
"params": params}))
def show_page(self, name, override_idle=None):
"""Begin showing the page in the GUI
Arguments:
name (str): Name of page (e.g "mypage.qml") to display
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
"""
self.show_pages([name], 0, override_idle)
def show_pages(self, page_names, index=0, override_idle=None):
"""Begin showing the list of pages in the GUI.
Arguments:
page_names (list): List of page names (str) to display, such as
["Weather.qml", "Forecast.qml", "Details.qml"]
index (int): Page number (0-based) to show initially. For the
above list a value of 1 would start on "Forecast.qml"
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
"""
if not isinstance(page_names, list):
raise ValueError('page_names must be a list')
if index > len(page_names):
raise ValueError('Default index is larger than page list length')
self.page = page_names[index]
# First sync any data...
data = self.__session_data.copy()
data.update({'__from': self.skill.skill_id})
self.skill.bus.emit(Message("gui.value.set", data))
# Convert pages to full reference
page_urls = []
for name in page_names:
if name.startswith("SYSTEM"):
page = resolve_resource_file(join('ui', name))
else:
page = self.skill.find_resource(name, 'ui')
if page:
if self.config.get('remote'):
page_urls.append(self.remote_url + "/" + page)
else:
page_urls.append("file://" + page)
else:
raise FileNotFoundError("Unable to find page: {}".format(name))
self.skill.bus.emit(Message("gui.page.show",
{"page": page_urls,
"index": index,
"__from": self.skill.skill_id,
"__idle": override_idle}))
def remove_page(self, page):
"""Remove a single page from the GUI.
Arguments:
page (str): Page to remove from the GUI
"""
return self.remove_pages([page])
def remove_pages(self, page_names):
"""Remove a list of pages in the GUI.
Arguments:
page_names (list): List of page names (str) to display, such as
["Weather.qml", "Forecast.qml", "Other.qml"]
"""
if not isinstance(page_names, list):
raise ValueError('page_names must be a list')
# Convert pages to full reference
page_urls = []
for name in page_names:
page = self.skill.find_resource(name, 'ui')
if page:
page_urls.append("file://" + page)
else:
raise FileNotFoundError("Unable to find page: {}".format(name))
self.skill.bus.emit(Message("gui.page.delete",
{"page": page_urls,
"__from": self.skill.skill_id}))
def show_text(self, text, title=None, override_idle=None):
"""Display a GUI page for viewing simple text.
Arguments:
text (str): Main text content. It will auto-paginate
title (str): A title to display above the text content.
"""
self.clear()
self["text"] = text
self["title"] = title
self.show_page("SYSTEM_TextFrame.qml", override_idle)
def show_image(self, url, caption=None,
title=None, fill=None,
override_idle=None):
"""Display a GUI page for viewing an image.
Arguments:
url (str): Pointer to the image
caption (str): A caption to show under the image
title (str): A title to display above the image content
fill (str): Fill type supports 'PreserveAspectFit',
'PreserveAspectCrop', 'Stretch'
"""
self.clear()
self["image"] = url
self["title"] = title
self["caption"] = caption
self["fill"] = fill
self.show_page("SYSTEM_ImageFrame.qml", override_idle)
def show_html(self, html, resource_url=None, override_idle=None):
"""Display an HTML page in the GUI.
Arguments:
html (str): HTML text to display
resource_url (str): Pointer to HTML resources
"""
self.clear()
self["html"] = html
self["resourceLocation"] = resource_url
self.show_page("SYSTEM_HtmlFrame.qml", override_idle)
def show_url(self, url, override_idle=None):
"""Display an HTML page in the GUI.
Arguments:
url (str): URL to render
"""
self.clear()
self["url"] = url
self.show_page("SYSTEM_UrlFrame.qml", override_idle)
def shutdown(self):
"""Shutdown gui interface.
Clear pages loaded through this interface and remove the skill
reference to make ref counting warning more precise.
"""
self.clear()
self.skill = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.