gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
# pylint: disable=C,R,W
"""Utility functions used across Superset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
from datetime import date, datetime, time, timedelta
import decimal
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
import errno
import functools
import json
import logging
import os
import signal
import smtplib
import sys
import uuid
import zlib
import bleach
import celery
from dateutil.parser import parse
from flask import flash, Markup, render_template
from flask_babel import gettext as __
from flask_caching import Cache
import markdown as md
import numpy
import pandas as pd
import parsedatetime
from past.builtins import basestring
from pydruid.utils.having import Having
import pytz
import sqlalchemy as sa
from sqlalchemy import event, exc, select
from sqlalchemy.types import TEXT, TypeDecorator
from superset.exceptions import SupersetException, SupersetTimeoutException
logging.getLogger('MARKDOWN').setLevel(logging.INFO)
PY3K = sys.version_info >= (3, 0)
EPOCH = datetime(1970, 1, 1)
DTTM_ALIAS = '__timestamp'
ADHOC_METRIC_EXPRESSION_TYPES = {
'SIMPLE': 'SIMPLE',
'SQL': 'SQL',
}
JS_MAX_INTEGER = 9007199254740991 # Largest int Java Script can handle 2^53-1
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg)
class _memoized(object): # noqa
"""Decorator that caches a function's return value each time it is called
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
Define ``watch`` as a tuple of attribute names if this Decorator
should account for instance variable changes.
"""
def __init__(self, func, watch=()):
self.func = func
self.cache = {}
self.is_method = False
self.watch = watch
def __call__(self, *args, **kwargs):
key = [args, frozenset(kwargs.items())]
if self.is_method:
key.append(tuple([getattr(args[0], v, None) for v in self.watch]))
key = tuple(key)
if key in self.cache:
return self.cache[key]
try:
value = self.func(*args, **kwargs)
self.cache[key] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args, **kwargs)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
if not self.is_method:
self.is_method = True
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def memoized(func=None, watch=None):
if func:
return _memoized(func)
else:
def wrapper(f):
return _memoized(f, watch)
return wrapper
def js_string_to_python(item):
return None if item in ('null', 'undefined') else item
def string_to_num(s):
"""Converts a string to an int/float
Returns ``None`` if it can't be converted
>>> string_to_num('5')
5
>>> string_to_num('5.2')
5.2
>>> string_to_num(10)
10
>>> string_to_num(10.1)
10.1
>>> string_to_num('this is not a string') is None
True
"""
if isinstance(s, (int, float)):
return s
if s.isdigit():
return int(s)
try:
return float(s)
except ValueError:
return None
class DimSelector(Having):
def __init__(self, **args):
# Just a hack to prevent any exceptions
Having.__init__(self, type='equalTo', aggregation=None, value=None)
self.having = {
'having': {
'type': 'dimSelector',
'dimension': args['dimension'],
'value': args['value'],
},
}
def list_minus(l, minus):
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
"""
if not s:
return None
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(s)
# when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm
def dttm_from_timtuple(d):
return datetime(
d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
def decode_dashboards(o):
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
import superset.models.core as models
from superset.connectors.sqla.models import (
SqlaTable, SqlMetric, TableColumn,
)
if '__Dashboard__' in o:
d = models.Dashboard()
d.__dict__.update(o['__Dashboard__'])
return d
elif '__Slice__' in o:
d = models.Slice()
d.__dict__.update(o['__Slice__'])
return d
elif '__TableColumn__' in o:
d = TableColumn()
d.__dict__.update(o['__TableColumn__'])
return d
elif '__SqlaTable__' in o:
d = SqlaTable()
d.__dict__.update(o['__SqlaTable__'])
return d
elif '__SqlMetric__' in o:
d = SqlMetric()
d.__dict__.update(o['__SqlMetric__'])
return d
elif '__datetime__' in o:
return datetime.strptime(o['__datetime__'], '%Y-%m-%dT%H:%M:%S')
else:
return o
class DashboardEncoder(json.JSONEncoder):
# pylint: disable=E0202
def default(self, o):
try:
vals = {
k: v for k, v in o.__dict__.items() if k != '_sa_instance_state'}
return {'__{}__'.format(o.__class__.__name__): vals}
except Exception:
if type(o) == datetime:
return {'__datetime__': o.replace(microsecond=0).isoformat()}
return json.JSONEncoder.default(self, o)
def parse_human_timedelta(s):
"""
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime('now') <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(datetime.now().timetuple())
d = cal.parse(s, dttm)[0]
d = datetime(d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
return d - dttm
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return '<nobr>{}</nobr>'.format(dttm)
def base_json_conv(obj):
if isinstance(obj, numpy.int64):
return int(obj)
elif isinstance(obj, numpy.bool_):
return bool(obj)
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, uuid.UUID):
return str(obj)
elif isinstance(obj, timedelta):
return str(obj)
elif isinstance(obj, bytes):
try:
return '{}'.format(obj)
except Exception:
return '[bytes]'
def json_iso_dttm_ser(obj, pessimistic=False):
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, date, time, pd.Timestamp)):
obj = obj.isoformat()
else:
if pessimistic:
return 'Unserializable [{}]'.format(type(obj))
else:
raise TypeError(
'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj
def pessimistic_json_iso_dttm_ser(obj):
"""Proxy to call json_iso_dttm_ser in a pessimistic way
If one of object is not serializable to json, it will still succeed"""
return json_iso_dttm_ser(obj, pessimistic=True)
def datetime_to_epoch(dttm):
if dttm.tzinfo:
epoch_with_tz = pytz.utc.localize(EPOCH)
return (dttm - epoch_with_tz).total_seconds() * 1000
return (dttm - EPOCH).total_seconds() * 1000
def now_as_float():
return datetime_to_epoch(datetime.utcnow())
def json_int_dttm_ser(obj):
"""json serializer that deals with dates"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, pd.Timestamp)):
obj = datetime_to_epoch(obj)
elif isinstance(obj, date):
obj = (obj - EPOCH.date()).total_seconds() * 1000
else:
raise TypeError(
'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj
def json_dumps_w_dates(payload):
return json.dumps(payload, default=json_int_dttm_ser)
def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e)
def markdown(s, markup_wrap=False):
safe_markdown_tags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'b', 'i',
'strong', 'em', 'tt', 'p', 'br', 'span',
'div', 'blockquote', 'code', 'hr', 'ul', 'ol',
'li', 'dd', 'dt', 'img', 'a']
safe_markdown_attrs = {'img': ['src', 'alt', 'title'],
'a': ['href', 'alt', 'title']}
s = md.markdown(s or '', [
'markdown.extensions.tables',
'markdown.extensions.fenced_code',
'markdown.extensions.codehilite',
])
s = bleach.clean(s, safe_markdown_tags, safe_markdown_attrs)
if markup_wrap:
s = Markup(s)
return s
def readfile(file_path):
with open(file_path) as f:
content = f.read()
return content
def generic_find_constraint_name(table, columns, referenced, db):
"""Utility to find a constraint name in alembic migrations"""
t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine)
for fk in t.foreign_key_constraints:
if fk.referred_table.name == referenced and set(fk.column_keys) == columns:
return fk.name
def generic_find_fk_constraint_name(table, columns, referenced, insp):
"""Utility to find a foreign-key constraint name in alembic migrations"""
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
return fk['name']
def generic_find_fk_constraint_names(table, columns, referenced, insp):
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
names.add(fk['name'])
return names
def generic_find_uq_constraint_name(table, columns, insp):
"""Utility to find a unique constraint name in alembic migrations"""
for uq in insp.get_unique_constraints(table):
if columns == set(uq['column_names']):
return uq['name']
def get_datasource_full_name(database_name, datasource_name, schema=None):
if not schema:
return '[{}].[{}]'.format(database_name, datasource_name)
return '[{}].[{}].[{}]'.format(database_name, schema, datasource_name)
def validate_json(obj):
if obj:
try:
json.loads(obj)
except Exception:
raise SupersetException('JSON is not valid')
def table_has_constraint(table, name, db):
"""Utility to find a constraint name in alembic migrations"""
t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine)
for c in t.constraints:
if c.name == name:
return True
return False
class timeout(object):
"""
To be used in a ``with`` block and timeout its content.
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
logging.error('Process timed out')
raise SupersetTimeoutException(self.error_message)
def __enter__(self):
try:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
except ValueError as e:
logging.warning("timeout can't be used in the current context")
logging.exception(e)
def __exit__(self, type, value, traceback):
try:
signal.alarm(0)
except ValueError as e:
logging.warning("timeout can't be used in the current context")
logging.exception(e)
def pessimistic_connection_handling(some_engine):
@event.listens_for(some_engine, 'engine_connect')
def ping_connection(connection, branch):
if branch:
# 'branch' refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
return
# turn off 'close with result'. This flag is only used with
# 'connectionless' execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# the SELECT of a scalar value without a table is
# appropriately formatted for the backend
connection.scalar(select([1]))
except exc.DBAPIError as err:
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a .connection_invalidated
# attribute which specifies if this connection is a 'disconnect'
# condition, which is based on inspection of the original exception
# by the dialect in use.
if err.connection_invalidated:
# run the same SELECT again - the connection will re-validate
# itself and establish a new connection. The disconnect detection
# here also causes the whole connection pool to be invalidated
# so that all stale connections are discarded.
connection.scalar(select([1]))
else:
raise
finally:
# restore 'close with result'
connection.should_close_with_result = save_should_close_with_result
class QueryStatus(object):
"""Enum-type class for query statuses"""
STOPPED = 'stopped'
FAILED = 'failed'
PENDING = 'pending'
RUNNING = 'running'
SCHEDULED = 'scheduled'
SUCCESS = 'success'
TIMED_OUT = 'timed_out'
def notify_user_about_perm_udate(
granter, user, role, datasource, tpl_name, config):
msg = render_template(tpl_name, granter=granter, user=user, role=role,
datasource=datasource)
logging.info(msg)
subject = __('[Superset] Access to the datasource %(name)s was granted',
name=datasource.full_name)
send_email_smtp(user.email, subject, msg, config, bcc=granter.email,
dryrun=not config.get('EMAIL_NOTIFICATIONS'))
def send_email_smtp(to, subject, html_content, config, files=None,
dryrun=False, cc=None, bcc=None, mime_subtype='mixed'):
"""
Send an email with html content, eg:
send_email_smtp(
'[email protected]', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True)
"""
smtp_mail_from = config.get('SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ', '.join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ', '.join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html')
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, 'rb') as f:
msg.attach(
MIMEApplication(
f.read(),
Content_Disposition="attachment; filename='%s'" % basename,
Name=basename))
send_MIME_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun)
def send_MIME_email(e_from, e_to, mime_msg, config, dryrun=False):
SMTP_HOST = config.get('SMTP_HOST')
SMTP_PORT = config.get('SMTP_PORT')
SMTP_USER = config.get('SMTP_USER')
SMTP_PASSWORD = config.get('SMTP_PASSWORD')
SMTP_STARTTLS = config.get('SMTP_STARTTLS')
SMTP_SSL = config.get('SMTP_SSL')
if not dryrun:
s = smtplib.SMTP_SSL(SMTP_HOST, SMTP_PORT) if SMTP_SSL else \
smtplib.SMTP(SMTP_HOST, SMTP_PORT)
if SMTP_STARTTLS:
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
logging.info('Sent an alert email to ' + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
else:
logging.info('Dryrun enabled, email notification content is below:')
logging.info(mime_msg.as_string())
def get_email_address_list(address_string):
if isinstance(address_string, basestring):
if ',' in address_string:
address_string = address_string.split(',')
elif ';' in address_string:
address_string = address_string.split(';')
else:
address_string = [address_string]
return address_string
def choicify(values):
"""Takes an iterable and makes an iterable of tuples with it"""
return [(v, v) for v in values]
def setup_cache(app, cache_config):
"""Setup the flask-cache on a flask app"""
if cache_config and cache_config.get('CACHE_TYPE') != 'null':
return Cache(app, config=cache_config)
def zlib_compress(data):
"""
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
"""
if PY3K:
if isinstance(data, str):
return zlib.compress(bytes(data, 'utf-8'))
return zlib.compress(data)
return zlib.compress(data)
def zlib_decompress_to_string(blob):
"""
Decompress things to a string in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
>>> got_str = zlib_decompress_to_string(blob)
>>> got_str == json_str
True
"""
if PY3K:
if isinstance(blob, bytes):
decompressed = zlib.decompress(blob)
else:
decompressed = zlib.decompress(bytes(blob, 'utf-8'))
return decompressed.decode('utf-8')
return zlib.decompress(blob)
_celery_app = None
def get_celery_app(config):
global _celery_app
if _celery_app:
return _celery_app
_celery_app = celery.Celery(config_source=config.get('CELERY_CONFIG'))
return _celery_app
def merge_extra_filters(form_data):
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
if 'extra_filters' in form_data:
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
if 'filters' not in form_data:
form_data['filters'] = []
date_options = {
'__from': 'since',
'__to': 'until',
'__time_col': 'granularity_sqla',
'__time_grain': 'time_grain_sqla',
'__time_origin': 'druid_time_origin',
'__granularity': 'granularity',
}
# Grab list of existing filters 'keyed' on the column and operator
def get_filter_key(f):
return f['col'] + '__' + f['op']
existing_filters = {}
for existing in form_data['filters']:
if existing['col'] is not None and existing['val'] is not None:
existing_filters[get_filter_key(existing)] = existing['val']
for filtr in form_data['extra_filters']:
# Pull out time filters/options and merge into form data
if date_options.get(filtr['col']):
if filtr.get('val'):
form_data[date_options[filtr['col']]] = filtr['val']
elif filtr['val'] and len(filtr['val']):
# Merge column filters
filter_key = get_filter_key(filtr)
if filter_key in existing_filters:
# Check if the filter already exists
if isinstance(filtr['val'], list):
if isinstance(existing_filters[filter_key], list):
# Add filters for unequal lists
# order doesn't matter
if (
sorted(existing_filters[filter_key]) !=
sorted(filtr['val'])
):
form_data['filters'] += [filtr]
else:
form_data['filters'] += [filtr]
else:
# Do not add filter if same value already exists
if filtr['val'] != existing_filters[filter_key]:
form_data['filters'] += [filtr]
else:
# Filter not found, add it
form_data['filters'] += [filtr]
# Remove extra filters from the form data since no longer needed
del form_data['extra_filters']
def merge_request_params(form_data, params):
url_params = {}
for key, value in params.items():
if key in ('form_data', 'r'):
continue
url_params[key] = value
form_data['url_params'] = url_params
def get_update_perms_flag():
val = os.environ.get('SUPERSET_UPDATE_PERMS')
return val.lower() not in ('0', 'false', 'no') if val else True
def user_label(user):
"""Given a user ORM FAB object, returns a label"""
if user:
if user.first_name and user.last_name:
return user.first_name + ' ' + user.last_name
else:
return user.username
def get_or_create_main_db():
from superset import conf, db
from superset.models import core as models
logging.info('Creating database reference')
dbobj = (
db.session.query(models.Database)
.filter_by(database_name='main')
.first())
if not dbobj:
dbobj = models.Database(database_name='main')
dbobj.set_sqlalchemy_uri(conf.get('SQLALCHEMY_DATABASE_URI'))
dbobj.expose_in_sqllab = True
dbobj.allow_run_sync = True
db.session.add(dbobj)
db.session.commit()
return dbobj
def is_adhoc_metric(metric):
return (
isinstance(metric, dict) and
(
(
metric['expressionType'] == ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE'] and
metric['column'] and
metric['aggregate']
) or
(
metric['expressionType'] == ADHOC_METRIC_EXPRESSION_TYPES['SQL'] and
metric['sqlExpression']
)
) and
metric['label']
)
def get_metric_name(metric):
return metric['label'] if is_adhoc_metric(metric) else metric
def get_metric_names(metrics):
return [get_metric_name(metric) for metric in metrics]
def ensure_path_exists(path):
try:
os.makedirs(path)
except OSError as exc:
if not (os.path.isdir(path) and exc.errno == errno.EEXIST):
raise
def split_adhoc_filters_into_base_filters(fd):
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
"""
adhoc_filters = fd.get('adhoc_filters', None)
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get('expressionType')
clause = adhoc_filter.get('clause')
if expression_type == 'SIMPLE':
if clause == 'WHERE':
simple_where_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif clause == 'HAVING':
simple_having_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif expression_type == 'SQL':
if clause == 'WHERE':
sql_where_filters.append(adhoc_filter.get('sqlExpression'))
elif clause == 'HAVING':
sql_having_filters.append(adhoc_filter.get('sqlExpression'))
fd['where'] = ' AND '.join(['({})'.format(sql) for sql in sql_where_filters])
fd['having'] = ' AND '.join(['({})'.format(sql) for sql in sql_having_filters])
fd['having_filters'] = simple_having_filters
fd['filters'] = simple_where_filters
|
|
import os
import numpy.testing as npt
from nose import SkipTest
from nose.tools import raises
import numpy as np
from statsmodels.distributions.mixture_rvs import mixture_rvs
from statsmodels.nonparametric.kde import KDEUnivariate as KDE
import statsmodels.sandbox.nonparametric.kernels as kernels
from scipy import stats
# get results from Stata
curdir = os.path.dirname(os.path.abspath(__file__))
rfname = os.path.join(curdir,'results','results_kde.csv')
#print rfname
KDEResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
rfname = os.path.join(curdir,'results','results_kde_univ_weights.csv')
KDEWResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
# get results from R
curdir = os.path.dirname(os.path.abspath(__file__))
rfname = os.path.join(curdir,'results','results_kcde.csv')
#print rfname
KCDEResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
# setup test data
np.random.seed(12345)
Xi = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
class TestKDEExceptions(object):
@classmethod
def setupClass(cls):
cls.kde = KDE(Xi)
cls.weights_200 = np.linspace(1, 100, 200)
cls.weights_100 = np.linspace(1, 100, 100)
@raises(ValueError)
def test_check_is_fit_exception(self):
self.kde.evaluate(0)
@raises(NotImplementedError)
def test_non_weighted_fft_exception(self):
self.kde.fit(kernel="gau", gridsize=50, weights=self.weights_200, fft=True,
bw="silverman")
@raises(ValueError)
def test_wrong_weight_length_exception(self):
self.kde.fit(kernel="gau", gridsize=50, weights=self.weights_100, fft=False,
bw="silverman")
@raises(NotImplementedError)
def test_non_gaussian_fft_exception(self):
self.kde.fit(kernel="epa", gridsize=50, fft=True,
bw="silverman")
class CheckKDE(object):
decimal_density = 7
def test_density(self):
npt.assert_almost_equal(self.res1.density, self.res_density,
self.decimal_density)
def test_evaluate(self):
# disable test
# fails for Epan, Triangular and Biweight, only Gaussian is correct
# added it as test method to TestKDEGauss below
# inDomain is not vectorized
#kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
class TestKDEGauss(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="gau", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["gau_d"]
def test_evaluate(self):
#kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
# The following tests are regression tests
# Values have been checked to be very close to R 'ks' package (Dec 2013)
def test_support_gridded(self):
kde = self.res1
support = KCDEResults['gau_support']
npt.assert_allclose(support, kde.support)
def test_cdf_gridded(self):
kde = self.res1
cdf = KCDEResults['gau_cdf']
npt.assert_allclose(cdf, kde.cdf)
def test_sf_gridded(self):
kde = self.res1
sf = KCDEResults['gau_sf']
npt.assert_allclose(sf, kde.sf)
def test_icdf_gridded(self):
kde = self.res1
icdf = KCDEResults['gau_icdf']
npt.assert_allclose(icdf, kde.icdf)
class TestKDEEpanechnikov(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="epa", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["epa2_d"]
class TestKDETriangular(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="tri", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["tri_d"]
class TestKDEBiweight(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="biw", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["biw_d"]
#NOTE: This is a knownfailure due to a definitional difference of Cosine kernel
#class TestKDECosine(CheckKDE):
# @classmethod
# def setupClass(cls):
# res1 = KDE(Xi)
# res1.fit(kernel="cos", fft=False, bw="silverman")
# cls.res1 = res1
# cls.res_density = KDEResults["cos_d"]
#weighted estimates taken from matlab so we can allow len(weights) != gridsize
class TestKdeWeights(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
weights = np.linspace(1,100,200)
res1.fit(kernel="gau", gridsize=50, weights=weights, fft=False,
bw="silverman")
cls.res1 = res1
rfname = os.path.join(curdir,'results','results_kde_weights.csv')
cls.res_density = np.genfromtxt(open(rfname, 'rb'), skip_header=1)
def test_evaluate(self):
#kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
class TestKDEGaussFFT(CheckKDE):
@classmethod
def setupClass(cls):
cls.decimal_density = 2 # low accuracy because binning is different
res1 = KDE(Xi)
res1.fit(kernel="gau", fft=True, bw="silverman")
cls.res1 = res1
rfname2 = os.path.join(curdir,'results','results_kde_fft.csv')
cls.res_density = np.genfromtxt(open(rfname2, 'rb'))
class CheckKDEWeights(object):
@classmethod
def setupClass(cls):
cls.x = x = KDEWResults['x']
weights = KDEWResults['weights']
res1 = KDE(x)
# default kernel was scott when reference values computed
res1.fit(kernel=cls.kernel_name, weights=weights, fft=False, bw="scott")
cls.res1 = res1
cls.res_density = KDEWResults[cls.res_kernel_name]
decimal_density = 7
def t_est_density(self):
npt.assert_almost_equal(self.res1.density, self.res_density,
self.decimal_density)
def test_evaluate(self):
if self.kernel_name == 'cos':
raise SkipTest("Cosine kernel fails against Stata")
kde_vals = [self.res1.evaluate(xi) for xi in self.x]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
def test_compare(self):
xx = self.res1.support
kde_vals = [self.res1.evaluate(xi) for xi in xx]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(self.res1.density, kde_vals,
self.decimal_density)
# regression test, not compared to another package
nobs = len(self.res1.endog)
kern = self.res1.kernel
v = kern.density_var(kde_vals, nobs)
v_direct = kde_vals * kern.L2Norm / kern.h / nobs
npt.assert_allclose(v, v_direct, rtol=1e-10)
ci = kern.density_confint(kde_vals, nobs)
crit = 1.9599639845400545 #stats.norm.isf(0.05 / 2)
hw = kde_vals - ci[:, 0]
npt.assert_allclose(hw, crit * np.sqrt(v), rtol=1e-10)
hw = ci[:, 1] - kde_vals
npt.assert_allclose(hw, crit * np.sqrt(v), rtol=1e-10)
def test_kernel_constants(self):
kern = self.res1.kernel
nc = kern.norm_const
# trigger numerical integration
kern._norm_const = None
nc2 = kern.norm_const
npt.assert_allclose(nc, nc2, rtol=1e-10)
l2n = kern.L2Norm
# trigger numerical integration
kern._L2Norm = None
l2n2 = kern.L2Norm
npt.assert_allclose(l2n, l2n2, rtol=1e-10)
v = kern.kernel_var
# trigger numerical integration
kern._kernel_var = None
v2 = kern.kernel_var
npt.assert_allclose(v, v2, rtol=1e-10)
class TestKDEWGauss(CheckKDEWeights):
kernel_name = "gau"
res_kernel_name = "x_gau_wd"
class TestKDEWEpa(CheckKDEWeights):
kernel_name = "epa"
res_kernel_name = "x_epan2_wd"
class TestKDEWTri(CheckKDEWeights):
kernel_name = "tri"
res_kernel_name = "x_" + kernel_name + "_wd"
class TestKDEWBiw(CheckKDEWeights):
kernel_name = "biw"
res_kernel_name = "x_bi_wd"
class TestKDEWCos(CheckKDEWeights):
kernel_name = "cos"
res_kernel_name = "x_cos_wd"
class TestKDEWCos2(CheckKDEWeights):
kernel_name = "cos2"
res_kernel_name = "x_cos_wd"
class T_estKDEWRect(CheckKDEWeights):
#TODO in docstring but not in kernel_switch
kernel_name = "rect"
res_kernel_name = "x_rec_wd"
class T_estKDEWPar(CheckKDEWeights):
# TODO in docstring but not implemented in kernels
kernel_name = "par"
res_kernel_name = "x_par_wd"
class TestKdeRefit():
np.random.seed(12345)
data1 = np.random.randn(100) * 100
pdf = KDE(data1)
pdf.fit()
data2 = np.random.randn(100) * 100
pdf2 = KDE(data2)
pdf2.fit()
for attr in ['icdf', 'cdf', 'sf']:
npt.assert_(not np.allclose(getattr(pdf, attr)[:10],
getattr(pdf2, attr)[:10]))
class TestNormConstant():
def test_norm_constant_calculation(self):
custom_gauss = kernels.CustomKernel(lambda x: np.exp(-x**2/2.0))
gauss_true_const = 0.3989422804014327
npt.assert_almost_equal(gauss_true_const, custom_gauss.norm_const)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
|
|
import simuPOP as sim
from simuPOP.utils import Exporter
Wild=sim.Population(
size=[1000,1000],
ploidy=2,
loci=[17,2,11,5,7,5,3,3,14,15,5,8,16,13,6,6,2,8,4,6,6,5,3,9,5,8,4,4],
lociPos=[
4.9,21.4,23.9,26.0,34.1,34.4,48.9,50.8,78.6,82.4,118.2,119.9,120.1,122.3,131.1,136.1,151.3,
24.7,43.3,
28.1,28.6,32.1,37.9,40.3,59.6,63.6,71.9,74.8,77.4,91.8,
9.0,20.0,47.5,47.7,55.8,
22.3,22.7,23.7,34.7,34.8,39.9,54.5,
35.9,38.9,55.9,59.8,68.0,
5.5,18.0,47.0,
18.5,19.8,22.0,
18.2,18.3,31.5,47.8,70.2,84.4,94.4,96.3,103.9,110.5,111.5,120.1,128.7,139.0,
9.3,17.8,19.3,20.8,24.2,29.1,31.7,37.0,64.1,67.9,74.6,74.9,75.0,75.4,113.0,
5.7,14.5,19.4,23.2,57.6,
26.4,34.2,34.3,36.9,61.5,62.5,66.6,78.4,
9.3,12.5,20.1,22.2,23.2,27.6,44.8,53.1,53.9,55.8,71.0,75.0,75.1,75.2,76.5,85.5,
14.8,15.2,23.9,34.1,37.0,54.3,54.6,55.4,60.7,61.1,64.8,80.1,82.3,
5.5,40.9,41.9,48.7,52.8,60.0,
16.9,42.0,68.1,69.0,71.0,81.9,
27.0,50.8,
13.9,23.3,24.2,27.1,28.2,53.6,56.3,66.4,
11.4,51.2,51.4,72.5,
33.0,33.8,47.4,50.5,63.1,69.1,
14.4,20.0,20.1,38.8,43.6,51.6,
6.5,7.5,18.4,20.1,44.2,
30.8,33.6,40.2,
1.3,1.4,11.1,12.1,12.4,17.0,19.8,34.4,47.0,
11.9,14.9,34.1,34.5,47.1,
13.5,19.8,21.0,23.9,25.8,26.4,27.0,28.9,
16.1,21.4,22.6,26.2,
8.6,13.2,28.1,35.8
],
ancGen=3,
alleleNames=['1','2'],
subPopNames=['escapee','wild'],
infoFields=['migrate_to','ind_id','father_id', 'mother_id']
)
sim.initSex(Wild)
Wild.evolve(
initOps=[
sim.InitSex(),
sim.IdTagger(),
sim.InitGenotype(subPops=[0] , loci=[0], freq=[0.515,0.485]),
sim.InitGenotype(subPops=[0] , loci=[1], freq=[0.617,0.383]),
sim.InitGenotype(subPops=[0] , loci=[2], freq=[0.37,0.63]),
sim.InitGenotype(subPops=[0] , loci=[3], freq=[0.447,0.553]),
sim.InitGenotype(subPops=[0] , loci=[4], freq=[0.485,0.515]),
sim.InitGenotype(subPops=[0] , loci=[5], freq=[0.599,0.401]),
sim.InitGenotype(subPops=[0] , loci=[6], freq=[0.432,0.568]),
sim.InitGenotype(subPops=[0] , loci=[7], freq=[0.495,0.505]),
sim.InitGenotype(subPops=[0] , loci=[8], freq=[0.385,0.615]),
sim.InitGenotype(subPops=[0] , loci=[9], freq=[0.576,0.424]),
sim.InitGenotype(subPops=[0] , loci=[10], freq=[0.528,0.472]),
sim.InitGenotype(subPops=[0] , loci=[11], freq=[0.152,0.848]),
sim.InitGenotype(subPops=[0] , loci=[12], freq=[0.443,0.557]),
sim.InitGenotype(subPops=[0] , loci=[13], freq=[0.532,0.468]),
sim.InitGenotype(subPops=[0] , loci=[14], freq=[0.548,0.452]),
sim.InitGenotype(subPops=[0] , loci=[15], freq=[0.544,0.456]),
sim.InitGenotype(subPops=[0] , loci=[16], freq=[0.545,0.455]),
sim.InitGenotype(subPops=[0] , loci=[17], freq=[0.564,0.436]),
sim.InitGenotype(subPops=[0] , loci=[18], freq=[0.367,0.633]),
sim.InitGenotype(subPops=[0] , loci=[19], freq=[0.273,0.727]),
sim.InitGenotype(subPops=[0] , loci=[20], freq=[0.473,0.527]),
sim.InitGenotype(subPops=[0] , loci=[21], freq=[0.535,0.465]),
sim.InitGenotype(subPops=[0] , loci=[22], freq=[0.597,0.403]),
sim.InitGenotype(subPops=[0] , loci=[23], freq=[0.552,0.448]),
sim.InitGenotype(subPops=[0] , loci=[24], freq=[0.516,0.484]),
sim.InitGenotype(subPops=[0] , loci=[25], freq=[0.544,0.456]),
sim.InitGenotype(subPops=[0] , loci=[26], freq=[0.583,0.417]),
sim.InitGenotype(subPops=[0] , loci=[27], freq=[0.495,0.505]),
sim.InitGenotype(subPops=[0] , loci=[28], freq=[0.473,0.527]),
sim.InitGenotype(subPops=[0] , loci=[29], freq=[0.286,0.714]),
sim.InitGenotype(subPops=[0] , loci=[30], freq=[0.552,0.448]),
sim.InitGenotype(subPops=[0] , loci=[31], freq=[0.667,0.333]),
sim.InitGenotype(subPops=[0] , loci=[32], freq=[0.333,0.667]),
sim.InitGenotype(subPops=[0] , loci=[33], freq=[0.466,0.534]),
sim.InitGenotype(subPops=[0] , loci=[34], freq=[0.492,0.508]),
sim.InitGenotype(subPops=[0] , loci=[35], freq=[0.496,0.504]),
sim.InitGenotype(subPops=[0] , loci=[36], freq=[0.604,0.396]),
sim.InitGenotype(subPops=[0] , loci=[37], freq=[0.325,0.675]),
sim.InitGenotype(subPops=[0] , loci=[38], freq=[0.465,0.535]),
sim.InitGenotype(subPops=[0] , loci=[39], freq=[0.459,0.541]),
sim.InitGenotype(subPops=[0] , loci=[40], freq=[0.511,0.489]),
sim.InitGenotype(subPops=[0] , loci=[41], freq=[0.605,0.395]),
sim.InitGenotype(subPops=[0] , loci=[42], freq=[0.478,0.522]),
sim.InitGenotype(subPops=[0] , loci=[43], freq=[0.445,0.555]),
sim.InitGenotype(subPops=[0] , loci=[44], freq=[0.53,0.47]),
sim.InitGenotype(subPops=[0] , loci=[45], freq=[0.522,0.478]),
sim.InitGenotype(subPops=[0] , loci=[46], freq=[0.721,0.279]),
sim.InitGenotype(subPops=[0] , loci=[47], freq=[0.59,0.41]),
sim.InitGenotype(subPops=[0] , loci=[48], freq=[0.528,0.472]),
sim.InitGenotype(subPops=[0] , loci=[49], freq=[0.407,0.593]),
sim.InitGenotype(subPops=[0] , loci=[50], freq=[0.554,0.446]),
sim.InitGenotype(subPops=[0] , loci=[51], freq=[0.524,0.476]),
sim.InitGenotype(subPops=[0] , loci=[52], freq=[0.056,0.944]),
sim.InitGenotype(subPops=[0] , loci=[53], freq=[0.435,0.565]),
sim.InitGenotype(subPops=[0] , loci=[54], freq=[0.46,0.54]),
sim.InitGenotype(subPops=[0] , loci=[55], freq=[0.587,0.413]),
sim.InitGenotype(subPops=[0] , loci=[56], freq=[0.331,0.669]),
sim.InitGenotype(subPops=[0] , loci=[57], freq=[0.47,0.53]),
sim.InitGenotype(subPops=[0] , loci=[58], freq=[0.366,0.634]),
sim.InitGenotype(subPops=[0] , loci=[59], freq=[0.367,0.633]),
sim.InitGenotype(subPops=[0] , loci=[60], freq=[0.506,0.494]),
sim.InitGenotype(subPops=[0] , loci=[61], freq=[0.678,0.322]),
sim.InitGenotype(subPops=[0] , loci=[62], freq=[0.489,0.511]),
sim.InitGenotype(subPops=[0] , loci=[63], freq=[0.524,0.476]),
sim.InitGenotype(subPops=[0] , loci=[64], freq=[0.66,0.34]),
sim.InitGenotype(subPops=[0] , loci=[65], freq=[0.735,0.265]),
sim.InitGenotype(subPops=[0] , loci=[66], freq=[0.59,0.41]),
sim.InitGenotype(subPops=[0] , loci=[67], freq=[0.47,0.53]),
sim.InitGenotype(subPops=[0] , loci=[68], freq=[0.442,0.558]),
sim.InitGenotype(subPops=[0] , loci=[69], freq=[0.461,0.539]),
sim.InitGenotype(subPops=[0] , loci=[70], freq=[0.486,0.514]),
sim.InitGenotype(subPops=[0] , loci=[71], freq=[0.544,0.456]),
sim.InitGenotype(subPops=[0] , loci=[72], freq=[0.125,0.875]),
sim.InitGenotype(subPops=[0] , loci=[73], freq=[0.405,0.595]),
sim.InitGenotype(subPops=[0] , loci=[74], freq=[0.432,0.568]),
sim.InitGenotype(subPops=[0] , loci=[75], freq=[0.526,0.474]),
sim.InitGenotype(subPops=[0] , loci=[76], freq=[0.523,0.477]),
sim.InitGenotype(subPops=[0] , loci=[77], freq=[0.337,0.663]),
sim.InitGenotype(subPops=[0] , loci=[78], freq=[0.558,0.442]),
sim.InitGenotype(subPops=[0] , loci=[79], freq=[0.45,0.55]),
sim.InitGenotype(subPops=[0] , loci=[80], freq=[0.442,0.558]),
sim.InitGenotype(subPops=[0] , loci=[81], freq=[0.489,0.511]),
sim.InitGenotype(subPops=[0] , loci=[82], freq=[0.723,0.277]),
sim.InitGenotype(subPops=[0] , loci=[83], freq=[0.465,0.535]),
sim.InitGenotype(subPops=[0] , loci=[84], freq=[0.518,0.482]),
sim.InitGenotype(subPops=[0] , loci=[85], freq=[0.676,0.324]),
sim.InitGenotype(subPops=[0] , loci=[86], freq=[0.591,0.409]),
sim.InitGenotype(subPops=[0] , loci=[87], freq=[0.556,0.444]),
sim.InitGenotype(subPops=[0] , loci=[88], freq=[0.499,0.501]),
sim.InitGenotype(subPops=[0] , loci=[89], freq=[0.5,0.5]),
sim.InitGenotype(subPops=[0] , loci=[90], freq=[0.487,0.513]),
sim.InitGenotype(subPops=[0] , loci=[91], freq=[0.682,0.318]),
sim.InitGenotype(subPops=[0] , loci=[92], freq=[0.698,0.302]),
sim.InitGenotype(subPops=[0] , loci=[93], freq=[0.506,0.494]),
sim.InitGenotype(subPops=[0] , loci=[94], freq=[0.656,0.344]),
sim.InitGenotype(subPops=[0] , loci=[95], freq=[0.474,0.526]),
sim.InitGenotype(subPops=[0] , loci=[96], freq=[0.303,0.697]),
sim.InitGenotype(subPops=[0] , loci=[97], freq=[0.451,0.549]),
sim.InitGenotype(subPops=[0] , loci=[98], freq=[0.362,0.638]),
sim.InitGenotype(subPops=[0] , loci=[99], freq=[0.527,0.473]),
sim.InitGenotype(subPops=[0] , loci=[100], freq=[0.547,0.453]),
sim.InitGenotype(subPops=[0] , loci=[101], freq=[0.508,0.492]),
sim.InitGenotype(subPops=[0] , loci=[102], freq=[0.481,0.519]),
sim.InitGenotype(subPops=[0] , loci=[103], freq=[0.455,0.545]),
sim.InitGenotype(subPops=[0] , loci=[104], freq=[0.381,0.619]),
sim.InitGenotype(subPops=[0] , loci=[105], freq=[0.524,0.476]),
sim.InitGenotype(subPops=[0] , loci=[106], freq=[0.479,0.521]),
sim.InitGenotype(subPops=[0] , loci=[107], freq=[0.463,0.537]),
sim.InitGenotype(subPops=[0] , loci=[108], freq=[0.813,0.187]),
sim.InitGenotype(subPops=[0] , loci=[109], freq=[0.52,0.48]),
sim.InitGenotype(subPops=[0] , loci=[110], freq=[0.558,0.442]),
sim.InitGenotype(subPops=[0] , loci=[111], freq=[0.559,0.441]),
sim.InitGenotype(subPops=[0] , loci=[112], freq=[0.336,0.664]),
sim.InitGenotype(subPops=[0] , loci=[113], freq=[0.628,0.372]),
sim.InitGenotype(subPops=[0] , loci=[114], freq=[0.741,0.259]),
sim.InitGenotype(subPops=[0] , loci=[115], freq=[0.931,0.069]),
sim.InitGenotype(subPops=[0] , loci=[116], freq=[0.533,0.467]),
sim.InitGenotype(subPops=[0] , loci=[117], freq=[0.6,0.4]),
sim.InitGenotype(subPops=[0] , loci=[118], freq=[0.519,0.481]),
sim.InitGenotype(subPops=[0] , loci=[119], freq=[0.537,0.463]),
sim.InitGenotype(subPops=[0] , loci=[120], freq=[0.541,0.459]),
sim.InitGenotype(subPops=[0] , loci=[121], freq=[0.495,0.505]),
sim.InitGenotype(subPops=[0] , loci=[122], freq=[0.232,0.768]),
sim.InitGenotype(subPops=[0] , loci=[123], freq=[0.69,0.31]),
sim.InitGenotype(subPops=[0] , loci=[124], freq=[0.467,0.533]),
sim.InitGenotype(subPops=[0] , loci=[125], freq=[0.504,0.496]),
sim.InitGenotype(subPops=[0] , loci=[126], freq=[0.483,0.517]),
sim.InitGenotype(subPops=[0] , loci=[127], freq=[0.465,0.535]),
sim.InitGenotype(subPops=[0] , loci=[128], freq=[0.543,0.457]),
sim.InitGenotype(subPops=[0] , loci=[129], freq=[0.769,0.231]),
sim.InitGenotype(subPops=[0] , loci=[130], freq=[0.492,0.508]),
sim.InitGenotype(subPops=[0] , loci=[131], freq=[0.479,0.521]),
sim.InitGenotype(subPops=[0] , loci=[132], freq=[0.658,0.342]),
sim.InitGenotype(subPops=[0] , loci=[133], freq=[0.559,0.441]),
sim.InitGenotype(subPops=[0] , loci=[134], freq=[0.469,0.531]),
sim.InitGenotype(subPops=[0] , loci=[135], freq=[0.41,0.59]),
sim.InitGenotype(subPops=[0] , loci=[136], freq=[0.462,0.538]),
sim.InitGenotype(subPops=[0] , loci=[137], freq=[0.503,0.497]),
sim.InitGenotype(subPops=[0] , loci=[138], freq=[0.415,0.585]),
sim.InitGenotype(subPops=[0] , loci=[139], freq=[0.687,0.313]),
sim.InitGenotype(subPops=[0] , loci=[140], freq=[0.949,0.051]),
sim.InitGenotype(subPops=[0] , loci=[141], freq=[0.558,0.442]),
sim.InitGenotype(subPops=[0] , loci=[142], freq=[0.501,0.499]),
sim.InitGenotype(subPops=[0] , loci=[143], freq=[0.553,0.447]),
sim.InitGenotype(subPops=[0] , loci=[144], freq=[0.522,0.478]),
sim.InitGenotype(subPops=[0] , loci=[145], freq=[0.57,0.43]),
sim.InitGenotype(subPops=[0] , loci=[146], freq=[0.297,0.703]),
sim.InitGenotype(subPops=[0] , loci=[147], freq=[0.549,0.451]),
sim.InitGenotype(subPops=[0] , loci=[148], freq=[0.439,0.561]),
sim.InitGenotype(subPops=[0] , loci=[149], freq=[0.614,0.386]),
sim.InitGenotype(subPops=[0] , loci=[150], freq=[0.476,0.524]),
sim.InitGenotype(subPops=[0] , loci=[151], freq=[0.286,0.714]),
sim.InitGenotype(subPops=[0] , loci=[152], freq=[0.442,0.558]),
sim.InitGenotype(subPops=[0] , loci=[153], freq=[0.694,0.306]),
sim.InitGenotype(subPops=[0] , loci=[154], freq=[0.48,0.52]),
sim.InitGenotype(subPops=[0] , loci=[155], freq=[0.574,0.426]),
sim.InitGenotype(subPops=[0] , loci=[156], freq=[0.495,0.505]),
sim.InitGenotype(subPops=[0] , loci=[157], freq=[0.536,0.464]),
sim.InitGenotype(subPops=[0] , loci=[158], freq=[0.344,0.656]),
sim.InitGenotype(subPops=[0] , loci=[159], freq=[0.476,0.524]),
sim.InitGenotype(subPops=[0] , loci=[160], freq=[0.494,0.506]),
sim.InitGenotype(subPops=[0] , loci=[161], freq=[0.759,0.241]),
sim.InitGenotype(subPops=[0] , loci=[162], freq=[0.475,0.525]),
sim.InitGenotype(subPops=[0] , loci=[163], freq=[0.678,0.322]),
sim.InitGenotype(subPops=[0] , loci=[164], freq=[0.518,0.482]),
sim.InitGenotype(subPops=[0] , loci=[165], freq=[0.632,0.368]),
sim.InitGenotype(subPops=[0] , loci=[166], freq=[0.479,0.521]),
sim.InitGenotype(subPops=[0] , loci=[167], freq=[0.353,0.647]),
sim.InitGenotype(subPops=[0] , loci=[168], freq=[0.965,0.035]),
sim.InitGenotype(subPops=[0] , loci=[169], freq=[0.483,0.517]),
sim.InitGenotype(subPops=[0] , loci=[170], freq=[0.7,0.3]),
sim.InitGenotype(subPops=[0] , loci=[171], freq=[0.519,0.481]),
sim.InitGenotype(subPops=[0] , loci=[172], freq=[0.323,0.677]),
sim.InitGenotype(subPops=[0] , loci=[173], freq=[0.463,0.537]),
sim.InitGenotype(subPops=[0] , loci=[174], freq=[0.56,0.44]),
sim.InitGenotype(subPops=[0] , loci=[175], freq=[0.391,0.609]),
sim.InitGenotype(subPops=[0] , loci=[176], freq=[0.51,0.49]),
sim.InitGenotype(subPops=[0] , loci=[177], freq=[0.466,0.534]),
sim.InitGenotype(subPops=[0] , loci=[178], freq=[0.599,0.401]),
sim.InitGenotype(subPops=[0] , loci=[179], freq=[0.528,0.472]),
sim.InitGenotype(subPops=[0] , loci=[180], freq=[0.687,0.313]),
sim.InitGenotype(subPops=[0] , loci=[181], freq=[0.341,0.659]),
sim.InitGenotype(subPops=[0] , loci=[182], freq=[0.579,0.421]),
sim.InitGenotype(subPops=[0] , loci=[183], freq=[0.474,0.526]),
sim.InitGenotype(subPops=[0] , loci=[184], freq=[0.615,0.385]),
sim.InitGenotype(subPops=[0] , loci=[185], freq=[0.818,0.182]),
sim.InitGenotype(subPops=[0] , loci=[186], freq=[0.404,0.596]),
sim.InitGenotype(subPops=[0] , loci=[187], freq=[0.567,0.433]),
sim.InitGenotype(subPops=[0] , loci=[188], freq=[0.402,0.598]),
sim.InitGenotype(subPops=[0] , loci=[189], freq=[0.489,0.511]),
sim.InitGenotype(subPops=[0] , loci=[190], freq=[0.537,0.463]),
sim.InitGenotype(subPops=[0] , loci=[191], freq=[0.514,0.486]),
sim.InitGenotype(subPops=[0] , loci=[192], freq=[0.486,0.514]),
sim.InitGenotype(subPops=[0] , loci=[193], freq=[0.565,0.435]),
sim.InitGenotype(subPops=[0] , loci=[194], freq=[0.543,0.457]),
sim.InitGenotype(subPops=[0] , loci=[195], freq=[0.633,0.367]),
sim.InitGenotype(subPops=[0] , loci=[196], freq=[0.548,0.452]),
sim.InitGenotype(subPops=[0] , loci=[197], freq=[0.458,0.542]),
sim.InitGenotype(subPops=[0] , loci=[198], freq=[0.612,0.388]),
sim.InitGenotype(subPops=[0] , loci=[199], freq=[0.572,0.428]),
sim.InitGenotype(subPops=[1] , loci=[0], freq=[0.062,0.938]),
sim.InitGenotype(subPops=[1] , loci=[1], freq=[0.989,0.011]),
sim.InitGenotype(subPops=[1] , loci=[2], freq=[0.027,0.973]),
sim.InitGenotype(subPops=[1] , loci=[3], freq=[0.028,0.972]),
sim.InitGenotype(subPops=[1] , loci=[4], freq=[0.934,0.066]),
sim.InitGenotype(subPops=[1] , loci=[5], freq=[0.966,0.034]),
sim.InitGenotype(subPops=[1] , loci=[6], freq=[0.95,0.05]),
sim.InitGenotype(subPops=[1] , loci=[7], freq=[0.031,0.969]),
sim.InitGenotype(subPops=[1] , loci=[8], freq=[0.959,0.041]),
sim.InitGenotype(subPops=[1] , loci=[9], freq=[0.988,0.012]),
sim.InitGenotype(subPops=[1] , loci=[10], freq=[0.909,0.091]),
sim.InitGenotype(subPops=[1] , loci=[11], freq=[0.868,0.132]),
sim.InitGenotype(subPops=[1] , loci=[12], freq=[0.017,0.983]),
sim.InitGenotype(subPops=[1] , loci=[13], freq=[0.944,0.056]),
sim.InitGenotype(subPops=[1] , loci=[14], freq=[0.02,0.98]),
sim.InitGenotype(subPops=[1] , loci=[15], freq=[0.975,0.025]),
sim.InitGenotype(subPops=[1] , loci=[16], freq=[0.944,0.056]),
sim.InitGenotype(subPops=[1] , loci=[17], freq=[0.988,0.012]),
sim.InitGenotype(subPops=[1] , loci=[18], freq=[0.869,0.131]),
sim.InitGenotype(subPops=[1] , loci=[19], freq=[0.017,0.983]),
sim.InitGenotype(subPops=[1] , loci=[20], freq=[0.932,0.068]),
sim.InitGenotype(subPops=[1] , loci=[21], freq=[0.079,0.921]),
sim.InitGenotype(subPops=[1] , loci=[22], freq=[0.949,0.051]),
sim.InitGenotype(subPops=[1] , loci=[23], freq=[0.024,0.976]),
sim.InitGenotype(subPops=[1] , loci=[24], freq=[0.939,0.061]),
sim.InitGenotype(subPops=[1] , loci=[25], freq=[0.957,0.043]),
sim.InitGenotype(subPops=[1] , loci=[26], freq=[0.958,0.042]),
sim.InitGenotype(subPops=[1] , loci=[27], freq=[0.949,0.051]),
sim.InitGenotype(subPops=[1] , loci=[28], freq=[0.064,0.936]),
sim.InitGenotype(subPops=[1] , loci=[29], freq=[0.023,0.977]),
sim.InitGenotype(subPops=[1] , loci=[30], freq=[0.949,0.051]),
sim.InitGenotype(subPops=[1] , loci=[31], freq=[0.987,0.013]),
sim.InitGenotype(subPops=[1] , loci=[32], freq=[0.021,0.979]),
sim.InitGenotype(subPops=[1] , loci=[33], freq=[0.906,0.094]),
sim.InitGenotype(subPops=[1] , loci=[34], freq=[0.96,0.04]),
sim.InitGenotype(subPops=[1] , loci=[35], freq=[0.958,0.042]),
sim.InitGenotype(subPops=[1] , loci=[36], freq=[0.056,0.944]),
sim.InitGenotype(subPops=[1] , loci=[37], freq=[0.932,0.068]),
sim.InitGenotype(subPops=[1] , loci=[38], freq=[0.898,0.102]),
sim.InitGenotype(subPops=[1] , loci=[39], freq=[0.059,0.941]),
sim.InitGenotype(subPops=[1] , loci=[40], freq=[0.09,0.91]),
sim.InitGenotype(subPops=[1] , loci=[41], freq=[0.978,0.022]),
sim.InitGenotype(subPops=[1] , loci=[42], freq=[0.879,0.121]),
sim.InitGenotype(subPops=[1] , loci=[43], freq=[0.029,0.971]),
sim.InitGenotype(subPops=[1] , loci=[44], freq=[0.95,0.05]),
sim.InitGenotype(subPops=[1] , loci=[45], freq=[0.933,0.067]),
sim.InitGenotype(subPops=[1] , loci=[46], freq=[0.034,0.966]),
sim.InitGenotype(subPops=[1] , loci=[47], freq=[0.961,0.039]),
sim.InitGenotype(subPops=[1] , loci=[48], freq=[0.956,0.044]),
sim.InitGenotype(subPops=[1] , loci=[49], freq=[0.038,0.962]),
sim.InitGenotype(subPops=[1] , loci=[50], freq=[0.931,0.069]),
sim.InitGenotype(subPops=[1] , loci=[51], freq=[0.896,0.104]),
sim.InitGenotype(subPops=[1] , loci=[52], freq=[0.503,0.497]),
sim.InitGenotype(subPops=[1] , loci=[53], freq=[0.013,0.987]),
sim.InitGenotype(subPops=[1] , loci=[54], freq=[0.914,0.086]),
sim.InitGenotype(subPops=[1] , loci=[55], freq=[0.986,0.014]),
sim.InitGenotype(subPops=[1] , loci=[56], freq=[0.017,0.983]),
sim.InitGenotype(subPops=[1] , loci=[57], freq=[0.93,0.07]),
sim.InitGenotype(subPops=[1] , loci=[58], freq=[0.018,0.982]),
sim.InitGenotype(subPops=[1] , loci=[59], freq=[0.043,0.957]),
sim.InitGenotype(subPops=[1] , loci=[60], freq=[0.058,0.942]),
sim.InitGenotype(subPops=[1] , loci=[61], freq=[0.979,0.021]),
sim.InitGenotype(subPops=[1] , loci=[62], freq=[0.064,0.936]),
sim.InitGenotype(subPops=[1] , loci=[63], freq=[0.921,0.079]),
sim.InitGenotype(subPops=[1] , loci=[64], freq=[0.154,0.846]),
sim.InitGenotype(subPops=[1] , loci=[65], freq=[0.172,0.828]),
sim.InitGenotype(subPops=[1] , loci=[66], freq=[0.982,0.018]),
sim.InitGenotype(subPops=[1] , loci=[67], freq=[0.891,0.109]),
sim.InitGenotype(subPops=[1] , loci=[68], freq=[0.902,0.098]),
sim.InitGenotype(subPops=[1] , loci=[69], freq=[0.067,0.933]),
sim.InitGenotype(subPops=[1] , loci=[70], freq=[0.923,0.077]),
sim.InitGenotype(subPops=[1] , loci=[71], freq=[0.055,0.945]),
sim.InitGenotype(subPops=[1] , loci=[72], freq=[0.029,0.971]),
sim.InitGenotype(subPops=[1] , loci=[73], freq=[0.935,0.065]),
sim.InitGenotype(subPops=[1] , loci=[74], freq=[0.97,0.03]),
sim.InitGenotype(subPops=[1] , loci=[75], freq=[0.987,0.013]),
sim.InitGenotype(subPops=[1] , loci=[76], freq=[0.98,0.02]),
sim.InitGenotype(subPops=[1] , loci=[77], freq=[0.933,0.067]),
sim.InitGenotype(subPops=[1] , loci=[78], freq=[0.987,0.013]),
sim.InitGenotype(subPops=[1] , loci=[79], freq=[0.047,0.953]),
sim.InitGenotype(subPops=[1] , loci=[80], freq=[0.982,0.018]),
sim.InitGenotype(subPops=[1] , loci=[81], freq=[0.078,0.922]),
sim.InitGenotype(subPops=[1] , loci=[82], freq=[0.99,0.01]),
sim.InitGenotype(subPops=[1] , loci=[83], freq=[0.909,0.091]),
sim.InitGenotype(subPops=[1] , loci=[84], freq=[0.934,0.066]),
sim.InitGenotype(subPops=[1] , loci=[85], freq=[0.988,0.012]),
sim.InitGenotype(subPops=[1] , loci=[86], freq=[0.973,0.027]),
sim.InitGenotype(subPops=[1] , loci=[87], freq=[0.982,0.018]),
sim.InitGenotype(subPops=[1] , loci=[88], freq=[0.076,0.924]),
sim.InitGenotype(subPops=[1] , loci=[89], freq=[0.909,0.091]),
sim.InitGenotype(subPops=[1] , loci=[90], freq=[0.924,0.076]),
sim.InitGenotype(subPops=[1] , loci=[91], freq=[0.987,0.013]),
sim.InitGenotype(subPops=[1] , loci=[92], freq=[0.99,0.01]),
sim.InitGenotype(subPops=[1] , loci=[93], freq=[0.054,0.946]),
sim.InitGenotype(subPops=[1] , loci=[94], freq=[0.06,0.94]),
sim.InitGenotype(subPops=[1] , loci=[95], freq=[0.027,0.973]),
sim.InitGenotype(subPops=[1] , loci=[96], freq=[0.93,0.07]),
sim.InitGenotype(subPops=[1] , loci=[97], freq=[0.933,0.067]),
sim.InitGenotype(subPops=[1] , loci=[98], freq=[0.884,0.116]),
sim.InitGenotype(subPops=[1] , loci=[99], freq=[0.922,0.078]),
sim.InitGenotype(subPops=[1] , loci=[100], freq=[0.93,0.07]),
sim.InitGenotype(subPops=[1] , loci=[101], freq=[0.082,0.918]),
sim.InitGenotype(subPops=[1] , loci=[102], freq=[0.937,0.063]),
sim.InitGenotype(subPops=[1] , loci=[103], freq=[0.945,0.055]),
sim.InitGenotype(subPops=[1] , loci=[104], freq=[0.011,0.989]),
sim.InitGenotype(subPops=[1] , loci=[105], freq=[0.108,0.892]),
sim.InitGenotype(subPops=[1] , loci=[106], freq=[0.929,0.071]),
sim.InitGenotype(subPops=[1] , loci=[107], freq=[0.071,0.929]),
sim.InitGenotype(subPops=[1] , loci=[108], freq=[0.199,0.801]),
sim.InitGenotype(subPops=[1] , loci=[109], freq=[0.086,0.914]),
sim.InitGenotype(subPops=[1] , loci=[110], freq=[0.082,0.918]),
sim.InitGenotype(subPops=[1] , loci=[111], freq=[0.979,0.021]),
sim.InitGenotype(subPops=[1] , loci=[112], freq=[0.928,0.072]),
sim.InitGenotype(subPops=[1] , loci=[113], freq=[0.089,0.911]),
sim.InitGenotype(subPops=[1] , loci=[114], freq=[0.988,0.012]),
sim.InitGenotype(subPops=[1] , loci=[115], freq=[0.201,0.799]),
sim.InitGenotype(subPops=[1] , loci=[116], freq=[0.968,0.032]),
sim.InitGenotype(subPops=[1] , loci=[117], freq=[0.986,0.014]),
sim.InitGenotype(subPops=[1] , loci=[118], freq=[0.965,0.035]),
sim.InitGenotype(subPops=[1] , loci=[119], freq=[0.015,0.985]),
sim.InitGenotype(subPops=[1] , loci=[120], freq=[0.969,0.031]),
sim.InitGenotype(subPops=[1] , loci=[121], freq=[0.067,0.933]),
sim.InitGenotype(subPops=[1] , loci=[122], freq=[0.882,0.118]),
sim.InitGenotype(subPops=[1] , loci=[123], freq=[0.99,0.01]),
sim.InitGenotype(subPops=[1] , loci=[124], freq=[0.05,0.95]),
sim.InitGenotype(subPops=[1] , loci=[125], freq=[0.072,0.928]),
sim.InitGenotype(subPops=[1] , loci=[126], freq=[0.921,0.079]),
sim.InitGenotype(subPops=[1] , loci=[127], freq=[0.019,0.981]),
sim.InitGenotype(subPops=[1] , loci=[128], freq=[0.948,0.052]),
sim.InitGenotype(subPops=[1] , loci=[129], freq=[0.179,0.821]),
sim.InitGenotype(subPops=[1] , loci=[130], freq=[0.893,0.107]),
sim.InitGenotype(subPops=[1] , loci=[131], freq=[0.936,0.064]),
sim.InitGenotype(subPops=[1] , loci=[132], freq=[0.972,0.028]),
sim.InitGenotype(subPops=[1] , loci=[133], freq=[0.987,0.013]),
sim.InitGenotype(subPops=[1] , loci=[134], freq=[0.921,0.079]),
sim.InitGenotype(subPops=[1] , loci=[135], freq=[0.041,0.959]),
sim.InitGenotype(subPops=[1] , loci=[136], freq=[0.065,0.935]),
sim.InitGenotype(subPops=[1] , loci=[137], freq=[0.086,0.914]),
sim.InitGenotype(subPops=[1] , loci=[138], freq=[0.968,0.032]),
sim.InitGenotype(subPops=[1] , loci=[139], freq=[0.14,0.86]),
sim.InitGenotype(subPops=[1] , loci=[140], freq=[0.542,0.458]),
sim.InitGenotype(subPops=[1] , loci=[141], freq=[0.04,0.96]),
sim.InitGenotype(subPops=[1] , loci=[142], freq=[0.936,0.064]),
sim.InitGenotype(subPops=[1] , loci=[143], freq=[0.95,0.05]),
sim.InitGenotype(subPops=[1] , loci=[144], freq=[0.114,0.886]),
sim.InitGenotype(subPops=[1] , loci=[145], freq=[0.952,0.048]),
sim.InitGenotype(subPops=[1] , loci=[146], freq=[0.017,0.983]),
sim.InitGenotype(subPops=[1] , loci=[147], freq=[0.128,0.872]),
sim.InitGenotype(subPops=[1] , loci=[148], freq=[0.011,0.989]),
sim.InitGenotype(subPops=[1] , loci=[149], freq=[0.978,0.022]),
sim.InitGenotype(subPops=[1] , loci=[150], freq=[0.08,0.92]),
sim.InitGenotype(subPops=[1] , loci=[151], freq=[0.877,0.123]),
sim.InitGenotype(subPops=[1] , loci=[152], freq=[0.935,0.065]),
sim.InitGenotype(subPops=[1] , loci=[153], freq=[0.987,0.013]),
sim.InitGenotype(subPops=[1] , loci=[154], freq=[0.966,0.034]),
sim.InitGenotype(subPops=[1] , loci=[155], freq=[0.977,0.023]),
sim.InitGenotype(subPops=[1] , loci=[156], freq=[0.106,0.894]),
sim.InitGenotype(subPops=[1] , loci=[157], freq=[0.103,0.897]),
sim.InitGenotype(subPops=[1] , loci=[158], freq=[0.896,0.104]),
sim.InitGenotype(subPops=[1] , loci=[159], freq=[0.935,0.065]),
sim.InitGenotype(subPops=[1] , loci=[160], freq=[0.895,0.105]),
sim.InitGenotype(subPops=[1] , loci=[161], freq=[0.038,0.962]),
sim.InitGenotype(subPops=[1] , loci=[162], freq=[0.913,0.087]),
sim.InitGenotype(subPops=[1] , loci=[163], freq=[0.985,0.015]),
sim.InitGenotype(subPops=[1] , loci=[164], freq=[0.926,0.074]),
sim.InitGenotype(subPops=[1] , loci=[165], freq=[0.045,0.955]),
sim.InitGenotype(subPops=[1] , loci=[166], freq=[0.98,0.02]),
sim.InitGenotype(subPops=[1] , loci=[167], freq=[0.01,0.99]),
sim.InitGenotype(subPops=[1] , loci=[168], freq=[0.393,0.607]),
sim.InitGenotype(subPops=[1] , loci=[169], freq=[0.924,0.076]),
sim.InitGenotype(subPops=[1] , loci=[170], freq=[0.12,0.88]),
sim.InitGenotype(subPops=[1] , loci=[171], freq=[0.925,0.075]),
sim.InitGenotype(subPops=[1] , loci=[172], freq=[0.011,0.989]),
sim.InitGenotype(subPops=[1] , loci=[173], freq=[0.93,0.07]),
sim.InitGenotype(subPops=[1] , loci=[174], freq=[0.105,0.895]),
sim.InitGenotype(subPops=[1] , loci=[175], freq=[0.035,0.965]),
sim.InitGenotype(subPops=[1] , loci=[176], freq=[0.955,0.045]),
sim.InitGenotype(subPops=[1] , loci=[177], freq=[0.023,0.977]),
sim.InitGenotype(subPops=[1] , loci=[178], freq=[0.988,0.012]),
sim.InitGenotype(subPops=[1] , loci=[179], freq=[0.929,0.071]),
sim.InitGenotype(subPops=[1] , loci=[180], freq=[0.986,0.014]),
sim.InitGenotype(subPops=[1] , loci=[181], freq=[0.92,0.08]),
sim.InitGenotype(subPops=[1] , loci=[182], freq=[0.98,0.02]),
sim.InitGenotype(subPops=[1] , loci=[183], freq=[0.023,0.977]),
sim.InitGenotype(subPops=[1] , loci=[184], freq=[0.108,0.892]),
sim.InitGenotype(subPops=[1] , loci=[185], freq=[0.049,0.951]),
sim.InitGenotype(subPops=[1] , loci=[186], freq=[0.011,0.989]),
sim.InitGenotype(subPops=[1] , loci=[187], freq=[0.979,0.021]),
sim.InitGenotype(subPops=[1] , loci=[188], freq=[0.025,0.975]),
sim.InitGenotype(subPops=[1] , loci=[189], freq=[0.059,0.941]),
sim.InitGenotype(subPops=[1] , loci=[190], freq=[0.956,0.044]),
sim.InitGenotype(subPops=[1] , loci=[191], freq=[0.045,0.955]),
sim.InitGenotype(subPops=[1] , loci=[192], freq=[0.053,0.947]),
sim.InitGenotype(subPops=[1] , loci=[193], freq=[0.959,0.041]),
sim.InitGenotype(subPops=[1] , loci=[194], freq=[0.957,0.043]),
sim.InitGenotype(subPops=[1] , loci=[195], freq=[0.078,0.922]),
sim.InitGenotype(subPops=[1] , loci=[196], freq=[0.062,0.938]),
sim.InitGenotype(subPops=[1] , loci=[197], freq=[0.919,0.081]),
sim.InitGenotype(subPops=[1] , loci=[198], freq=[0.94,0.06]),
sim.InitGenotype(subPops=[1] , loci=[199], freq=[0.949,0.051])
],
preOps=sim.Migrator(rate=[100], mode=sim.BY_COUNTS, subPops=[0],toSubPops=[1]),
matingScheme=sim.HomoMating(
sim.RandomParentsChooser(),
sim.OffspringGenerator(ops=[
sim.Recombinator(intensity=0.01),
sim.IdTagger(),
sim.PedigreeTagger()
]),
subPopSize=[1000,1000]
),
gen=3
)
sim.utils.export(Wild, format='PED', output='Wild_beta.ped')
|
|
import os, sys, logging, datetime, multiprocessing, icebridge_common, shutil
import threading, time
import flight_list # Contains AN_FLIGHTS and GR_FLIGHTS
#===============================================================================
# Constants
COMPLETED_DATES_FILE = '/u/smcmich1/icebridge/upload_software/completed_dates.txt'
FAILED_DATES_FILE = '/u/smcmich1/icebridge/upload_software/failed_dates.txt'
SOFTWARE_FOLDER = '/u/smcmich1/icebridge/upload_software/'
SIPS_LOG_FOLDER = os.path.join(SOFTWARE_FOLDER, 'logs')
INITIAL_UNPACK_FOLDER = '/nobackup/smcmich1/icebridge/nsidc_uploads/temp_tar_unpack'
ASP_PATH = '/u/smcmich1/programs/StereoPipeline-2.6.0-2018-02-06-x86_64-Linux/bin/'
EMAIL_ADDRESS = '[email protected]'
# Controls access to the two DATES files.
FLIGHT_DATES_RW_LOCK = threading.Lock()
#===============================================================================
# Classes
class Date:
def __init__(self, year, month, day):
self.year = int(year)
self.month = int(month)
self.day = int(day)
def __str__(self):
return ('%04d%02d%02d' % (self.year, self.month, self.day))
def yyyymmdd(self):
return ('%04d%02d%02d' % (self.year, self.month, self.day))
def yyyy_mm_dd(self):
return ('%04d_%02d_%02d' % (self.year, self.month, self.day))
def yyyy_mm(self):
return ('%04d_%02d' % (self.year, self.month))
class Campaign:
def __init__(self, site, year):
self.site = site
self.year = year
def getAllDates(self):
'''Return a list of all the valid dates for this campaign'''
if self.site == 'AN':
input_list = flight_list.AN_FLIGHTS
else:
input_list = flight_list.GR_FLIGHTS
flights = [f for f in input_list if f.startswith(self.year)]
dates = []
for f in flights:
year = f[0:4]
month = f[4:6]
day = f[6:8]
dates.append(Date(year,month,day))
return dates
def __str__(self):
'''Return the string representation'''
return self.site + '_' + self.year
def upload_log_and_cleanup(dem_folder, ortho_folder, dem_summary_folder, ortho_summary_folder,
unpack_prefix, has_dem_summary, has_ortho_summary,
dem_tarball, ortho_tarball, camera_tarball, summary_tarball,
remote_folder, date, logger):
'''Called by the worker thread in UploadManager'''
print 'Ready to upload folder ' + dem_folder
print 'Ready to upload folder ' + ortho_folder
#return # DEBUG!!!!
try:
# Upload the data
logger.info('Beginning data upload for flight ' + str(date))
uploadFolderToNSIDC(dem_folder, 'dem/' +remote_folder, logger)
uploadFolderToNSIDC(ortho_folder, 'ortho/'+remote_folder, logger)
if has_dem_summary:
uploadFolderToNSIDC(dem_summary_folder, 'dem/' +remote_folder, logger)
if has_ortho_summary:
uploadFolderToNSIDC(ortho_summary_folder, 'ortho/'+remote_folder, logger)
logger.info('Data upload finished for flight ' + str(date))
success = has_dem_summary and has_ortho_summary
except Exception as e:
success = False
logger.error('Caught exception for date ' + str(date) +'\n' + str(e))
if success:
# Log the input tarballs we used and whether we had all summary files.
updateLogFile(COMPLETED_DATES_FILE, date, dem_tarball, ortho_tarball,
camera_tarball, summary_tarball,
has_dem_summary, has_ortho_summary)
subject = 'COMPLETED flight date: ' + str(date)
logger.info(subject)
else:
updateLogFile(FAILED_DATES_FILE, date, dem_tarball, ortho_tarball,
camera_tarball, summary_tarball,
has_dem_summary, has_ortho_summary)
subject = 'FAILED to process flight date '+ str(date)
logger.error(subject)
sendEmail(EMAIL_ADDRESS, subject, 'NT')
# Clean up the temporary folders
#raise Exception('DEBUG')
if success:
logger.info('Ready to delete folders: ' + unpack_prefix)
cmd = 'rm -rf ' + unpack_prefix + '*'
logger.info(cmd)
os.system(cmd)
class UploadManager():
'''Class to keep uploading data in the background while the main process starts a new flight.'''
def __init__(self):
self._worker = None
def __del__(self):
self.cleanup()
def cleanup(self):
if self._worker != None:
self._worker.join()
self.worker = None
def uploadFlight(self, dem_folder, ortho_folder, dem_summary_folder, ortho_summary_folder,
unpack_prefix, has_dem_summary, has_ortho_summary,
dem_tarball, ortho_tarball, camera_tarball, summary_tarball,
remote_folder, date, logger):
'''Upload the flight in a separate thread. If another flight is still being uploaded,
blocks until that upload is finished.'''
# Block here until we are not busy with another upload.
if self._worker != None:
self._worker.join()
# Set up a working thread with the information
self._worker = threading.Thread(target=upload_log_and_cleanup,
args=(dem_folder, ortho_folder, dem_summary_folder,
ortho_summary_folder, unpack_prefix,
has_dem_summary, has_ortho_summary,
dem_tarball, ortho_tarball,
camera_tarball, summary_tarball,
remote_folder, date, logger))
# Let the worker thread run on its own
self._worker.start()
return
#===============================================================================
# Functions
def sendEmail(address, subject, body):
'''Send a simple email from the command line'''
# Remove any quotes, as that confuses the command line.
subject = subject.replace("\"", "")
body = body.replace("\"", "")
try:
cmd = 'mail -s "' + subject + '" ' + address + ' <<< "' + body + '"'
os.system(cmd)
except Exception as e:
print("Could not send mail.")
def getLatestTarFileMatchingDate(dirs, date):
'''Find the most recent tar file containing a date in the given folders'''
date_string = str(date)
candidates = []
for d in dirs:
# Get all matching tar files (possibly multiple versions)
tars = os.listdir(d)
new_candidates = [f for f in tars if ('.tar' in f) and (date_string in f)]
candidates = candidates + [os.path.join(d, f) for f in new_candidates]
# Ignore files manually marked not to use!
candidates = [c for c in candidates if 'old' not in c]
if not candidates:
raise Exception('No tarballs found for date ' + str(date))
# The last file alphabetically is the latest one
return sorted(candidates)[-1]
# Functions to find needed tarballs.
def findDemTarball(campaign, date):
dirs = ['/u/smcmich1/icebridge/output', '/u/smcmich1/icebridge/oleg_dems']
return getLatestTarFileMatchingDate(dirs, date)
def findOrthoTarball(campaign, date):
dirs = ['/u/smcmich1/icebridge/ortho', '/u/smcmich1/icebridge/oleg_ortho']
return getLatestTarFileMatchingDate(dirs, date)
def findCameraTarball(campaign, date):
dirs = ['/u/smcmich1/icebridge/camera', '/u/smcmich1/icebridge/oleg_cameras']
return getLatestTarFileMatchingDate(dirs, date)
def findSummaryTarball(campaign, date):
dirs = ['/u/smcmich1/icebridge/summaries', '/u/smcmich1/icebridge/oleg_summaries']
return getLatestTarFileMatchingDate(dirs, date)
def fetchTarballsFromTapes(campaign, date_list, logger):
'''Request that all of the tarballs we will need for this run be loaded from tape.'''
logger.info('Locating all the tarballs needed for ' + str(len(date_list)) + ' dates.')
# Find all the tarballs we will need
needed_files = []
for date in date_list:
try:
dem_tarball = findDemTarball (campaign, date)
ortho_tarball = findOrthoTarball (campaign, date)
camera_tarball = findCameraTarball (campaign, date)
summary_tarball = findSummaryTarball(campaign, date)
needed_files.append(dem_tarball)
needed_files.append(ortho_tarball)
needed_files.append(camera_tarball)
needed_files.append(summary_tarball)
except:
logger.error('Error finding all tarballs for date: ' + str(date))
logger.info('Requesting that these dates be loaded from tape!')
# Build a command to fetch them all at once.
cmd = 'dmget '
for f in needed_files:
cmd += f + ' '
cmd += '&' # Run this command in the background so we can start processing as soon as files are ready.
logger.info(cmd)
os.system(cmd)
def unpackTarAndGetFileList(tarPath, storage_folder, flight_title, logger, isSummary=False):
'''Extract the tif files from a tarball into a specified folder.'''
logger.info('Unpacking tar file: ' + tarPath)
if os.path.exists(storage_folder):
logger.info('Storage folder already exists, skipping unpack.')
else:
# Each flight uses a different temp unpack location
this_unpack_folder = os.path.join(INITIAL_UNPACK_FOLDER, flight_title)
os.system('mkdir -p ' + this_unpack_folder)
cmd = 'tar -xf ' + tarPath + ' --directory ' + this_unpack_folder
print cmd
logger.info(cmd)
# os.system(cmd)
logger.info('Finished tar unpack command, looking for output...')
possible_directories = ['tarAssembly', 'processed', 'camera', 'summary', flight_title]
file_dir = []
top_folder = os.path.join(this_unpack_folder, flight_title)
for d in possible_directories:
test_dir = os.path.join(top_folder, d)
print(test_dir)
if os.path.exists(test_dir):
file_dir = test_dir
break
test_dir = os.path.join(this_unpack_folder, d)
print(test_dir)
if os.path.exists(test_dir):
file_dir = test_dir
break
if not file_dir:
raise Exception('ERROR: Did not find unpack folders for storage folder ' + storage_folder)
logger.info('Found data in: ' + file_dir + ', moving to ' + storage_folder)
# Move all the data files into a new directory
cmd = 'mv ' + file_dir +' '+ storage_folder
print cmd
logger.info(cmd)
os.system(cmd)
# Delete the unpack folder.
cmd = 'rm -rf ' + this_unpack_folder
print cmd
logger.info(cmd)
os.system(cmd)
logger.info('Retrieving the file list...')
# Get all the .tif files in the folder
# - Also need to handle cases where files are in a subfolder.
# In those cases we record the subfolder path and will get the file later.
all_file_list = os.listdir(storage_folder)
file_list = []
bad_file_list = []
needed_file_types = ['.tif', '.tsai', '.jpg', '.jpeg']
for f in all_file_list:
full_path = os.path.join(storage_folder, f)
ext = os.path.splitext(f)[1]
if (ext in needed_file_types) or (os.path.isdir(full_path) and len(f) > 3):
# Empty image files are a problem, but empty camera files
# are ok since we only use them for the file names.
if (os.path.getsize(full_path) == 0) and (ext != '.tsai'):
if isSummary: # We will just regenerate these later
print 'Deleting empty summary file: ' + f
os.remove(full_path)
else:
bad_file_list.append(full_path)
print('After unpack, got empty file: ' + f)
else: # A good file!
file_list.append(full_path)
num_bad_files = len(bad_file_list)
logger.info('Num bad files = ' + str(num_bad_files))
if num_bad_files > 0:
raise Exception('Quitting because of missing files after unpacking ' + tarPath)
return file_list
def add_timestamps_to_files(input_files, camera_files, postfix, browse):
'''Update the input file names to include timestamps'''
if not input_files:
return
# Associate each camera file with its frame number
camera_frames = {}
for c in camera_files:
parts = os.path.basename(c)[0:-5].split('_')
frame = int(parts[3])
camera_frames[frame] = parts
# Rename each of the DEM files to this format:
# IODEM3_20091016_17534868_02172_DEM.tif
# Ortho files start with IODIM3.
prefix = 'IODEM3_'
if postfix == 'ORTHO':
prefix = 'IODIM3_'
input_dir = os.path.dirname(input_files[0])
missing_files = False
for in_file in input_files:
fname = os.path.basename(in_file)
if prefix in fname: # Skip already converted files
continue
parts = os.path.splitext(fname)[0].split('_')
if len(parts) > 1:
frame = int(parts[1])
else: # Handle old files with just the frame number
frame = int(parts[0])
try:
cam_parts = camera_frames[frame]
except KeyError:
print('Missing camera file for input image: ' + in_file)
missing_files = True
continue
new_name = (prefix + cam_parts[1] +'_' + cam_parts[2]
+'_' + cam_parts[3] +'_'+ postfix)
if browse:
if postfix == 'ORTHO':
new_name += '.jpg'
else: # DEM
new_name += '_browse.tif'
else:
new_name += '.tif'
new_path = os.path.join(input_dir, new_name)
# Sometimes the file is inside a folder where the folder has the frame name
if os.path.isdir(in_file):
sub_files = os.listdir(in_file)
if len(sub_files) != 1:
raise Exception('Too many subfiles for folder: ' + in_file)
cmd = 'mv ' + os.path.join(in_file, sub_files[0]) +' '+ new_path
#print cmd
os.system(cmd)
cmd = 'rm -rf ' + in_file # Clean up the empty folder
#print cmd
os.system(cmd)
else: # Normal file
cmd = 'mv ' + in_file +' '+ new_path
#print cmd
os.system(cmd)
if missing_files:
raise Exception('Missing at least one camera file, check the log!')
def makeConfigCopy(source_path, output_path, data_folder, browseFolder, isDim):
'''Make a copy of the the .cfg file with the data folder inserted.'''
os.system('rm -f ' + output_path) # Always start fresh!
source_file = open(source_path, 'r')
output_file = open(output_path, 'a')
# Point to the correct config files
mcfName = 'IODEM3#001.MCF'
pushName = 'push_DEM.cfg'
if isDim:
mcfName = 'IODIM3#001.MCF'
pushName = 'push_DIM.cfg'
for line in source_file:
line = line.replace('MCF_REPLACE', mcfName)
line = line.replace('PUSH_REPLACE', pushName)
if browseFolder:
line = line.replace('BROWSE_PRESENT_REPLACE', 'Y')
else:
line = line.replace('BROWSE_PRESENT_REPLACE', 'N')
line = line.replace('BROWSE_FOLDER_REPLACE', browseFolder + '/')
if isDim: # ORTHO
line = line.replace('BROWSE_EXT_REPLACE', '.jpg')
else: # DEM
if browseFolder:
line = line.replace('BROWSE_EXT_REPLACE', '_browse.tif')
else: # Original file
line = line.replace('BROWSE_EXT_REPLACE', '.tif')
line = line.replace('REPLACE', data_folder)
output_file.write(line)
source_file.close()
output_file.close()
def createSummaryFile(inFile, outFile, isOrtho):
'''Create one summary file'''
if isOrtho:
cmd = ASP_PATH + 'gdal_translate -scale -outsize 25% 25% -of jpeg ' + inFile +' '+ outFile
print cmd
os.system(cmd)
else:
# Hillshade then downsample.
tempPath = outFile + '_temp.tif'
cmd = ASP_PATH + 'hillshade ' + inFile +' -o ' + tempPath
print cmd
os.system(cmd)
cmd = (ASP_PATH +'gdal_translate '+ tempPath +' '+ outFile+
' -of GTiff -outsize 40% 40% -b 1 -co "COMPRESS=JPEG"')
print cmd
os.system(cmd)
os.system('rm ' + tempPath)
def replaceMissingSummaryFiles(folder, summaryFolder, isOrtho, logger):
'''Recreate any missing summary files'''
inFiles = os.listdir(folder)
summaryFiles = os.listdir(summaryFolder)
# Associate each summary file with its frame number
summaryFrames = {}
for c in summaryFiles:
parts = os.path.basename(c).split('_')
frame = int(parts[3])
summaryFrames[frame] = c
# See if any input files are missing their summary frame
missingSummaryList = []
for f in inFiles:
# Skip metadata files
ext = os.path.splitext(f)[1]
if ext != '.tif':
continue
parts = os.path.basename(f).split('_')
frame = int(parts[3])
success = False
try:
summary = summaryFrames[frame]
# File exists, make sure it is not empty.
fullPath = os.path.join(summaryFolder, summary)
if (os.path.getsize(fullPath) > 0):
success = True
continue
except KeyError: # Other errors are unexpected
pass # File does not exist
if not success:
logger.info('Detected missing or empty summary file for: ' + f)
# Get the desired summary file name
if isOrtho:
summaryName = f.replace('.tif', '.jpg')
else: # DEM
summaryName = f.replace('.tif', '_browse.tif')
summaryPath = os.path.join(summaryFolder, summaryName)
inputPath = os.path.join(folder, f)
missingSummaryList.append((inputPath, summaryPath))
# If we are missing too many files fail out, otherwise regenerate them.
MAX_RECREATE_FILES = 200
numMissingFiles = len(missingSummaryList)
if numMissingFiles > MAX_RECREATE_FILES:
raise Exception('Missing too many summary files: ' + str(numMissingFiles))
else:
logger.info('Will regenerate ' + str(numMissingFiles) + ' missing summary files.')
for (inputPath, summaryPath) in missingSummaryList:
# Recreate the missing file.
createSummaryFile(inputPath, summaryPath, isOrtho)
if os.path.exists(summaryPath) and (os.path.getsize(summaryPath) > 0):
logger.info('Successfully created summary file: ' + summaryPath)
else:
logger.error('Failed to create summary file: ' + summaryPath)
raise Exception('Failed to create summary file: ' + summaryPath)
def verifyMetadataFiles(folder, extensions, logger):
'''Raise an exception if all of the required metadata files for upload are not present'''
new_files = os.listdir(folder)
counts = {}
counts['tif'] = 0
for e in extensions:
counts[e] = 0
for f in new_files: # Count up all the file extensions
(base, ext) = os.path.splitext(f)
base = os.path.join(folder, base)
if ext == '.tif':
counts['tif'] += 1
# For each TIF file, log if the other files are missing so
# we can find the bad ones.
for e in extensions:
ePath = base + '.tif.' + e
if not os.path.exists(ePath):
logger.error('Missing output file: ' + ePath)
# Accumulate the proper extension
for e in extensions:
if ext == '.'+e:
counts[e] += 1
# Make sure all the counts are the same
for e in extensions:
if counts['tif'] != counts[e]:
msg = ('Error: in folder ' + folder + ', counts = ' + str(counts))
logger.error(msg)
raise RuntimeError(msg)
def runMetGen(configPath):
'''Used to run this in a Process'''
cmd = SOFTWARE_FOLDER + 'MetGen.Start -config ' + configPath
print cmd
os.system(cmd)
def uploadFolderToNSIDC(folder, remote_folder, logger):
'''Send the data to NSIDC!'''
# Push the directory to NSIDC
remoteDirPath = os.path.join('/incoming', 'Ames', remote_folder)
logger.info("Storing at NSIDC in: " + remoteDirPath)
# Not using remote delete since we are pushing two local folders
# into a single remote folder.
#cmd = ('lftp -e "mirror -P 20 -c -R -vvv --delete --delete-first ' + folder +
# ' ' + remoteDirPath + ' -i \'\.(tif|jpg|PDR|met)$\'; bye\" -u ' + auth)
cmd = ('lftp -e "mirror -P 20 -c -R -vvv ' + folder +
' ' + remoteDirPath + ' -i \'\.(tif|jpg|PDR|met)$\'; bye\" -u ' + auth)
logger.info(cmd)
#raise Exception('DEBUG')
status = os.system(cmd)
logger.info("LFTP status: " + str(status))
if status != 0:
raise Exception("Problem pushing folder: " + folder)
def updateLogFile(log_file, date, dem_tarball, ortho_tarball, camera_tarball, summary_tarball,
dem_summary_present, ortho_summary_present):
'''Add an entry to the log file if it is not already there'''
if checkLogFile(log_file, date):
return
with FLIGHT_DATES_RW_LOCK: # Grab the lock before writing
with open(log_file, 'a') as f:
s = ('%s, %s, %s, %s, %s, %s, %s' %
(str(date), dem_tarball, ortho_tarball, camera_tarball, summary_tarball,
dem_summary_present, ortho_summary_present))
f.write(s + '\n')
def checkLogFile(log_file, date):
'''Return true if the date is in the file'''
with FLIGHT_DATES_RW_LOCK: # Grab the lock before reading
with open(log_file, 'r') as f:
for line in f:
parts = line.split()
if not parts:
continue
if str(date) in parts[0]:
return True
return False
#========================================================
def main():
# Take the location and year as an input.
if len(sys.argv) != 4:
print 'Usage: oib_uploader.py <site> <year> <unpack_folder>'
return -1
this_campaign = Campaign(sys.argv[1], sys.argv[2])
unpack_folder = sys.argv[3]
# Set up logging
logFolder = '/u/smcmich1/icebridge/logs'
logLevel = logging.INFO
logger = icebridge_common.setUpLogger(logFolder, logLevel, "push_"+str(this_campaign))
logger.info("Logging in: " + logFolder)
# TODO: Maybe we want to keep this around in the future.
# SIPS internally records which files it has already processed and will not reprocess
# them even if the output files are missing, so delete it to avoid trouble.
logger.info('Deleting SIPS log folder.')
os.system('rm -rf ' + SIPS_LOG_FOLDER)
valid_dates = this_campaign.getAllDates()
# Filter out the dates we have already uploaded.
dates_to_upload = []
for d in valid_dates:
if ( (not checkLogFile(COMPLETED_DATES_FILE, d)) and
(not checkLogFile(FAILED_DATES_FILE, d)) ):
dates_to_upload.append(d)
for d in dates_to_upload:
print(d)
if not dates_to_upload:
print 'No dates to upload!'
#dates_to_upload = [Date(2010,05,20)] # DEBUG
# Start retrieving all of the files we will need from tape in the background.
# - This takes a long time but it is more efficient to do all at once and
# we can start processing as soon as required files are finished.
fetchTarballsFromTapes(this_campaign, dates_to_upload, logger)
UPLOAD_LIMIT = 4 # Upload this many flights before stopping.
num_uploaded = 0
# This object holds on to the uploading (as opposed to prep) thread.
uManager = UploadManager()
# Loop through all the dates that have not been uploaded yet.
for date in dates_to_upload:
logger.info('Uploading date: ' + str(date))
# Make a temporary folder
print 'Using unpack folder: ' + unpack_folder
os.system('mkdir -p ' + unpack_folder)
os.system('cd ' + unpack_folder)
dem_summary_files = [] # Init for later
ortho_summary_files = []
dem_tarball = None
ortho_tarball = None
camera_tarball = None
summary_tarball = None
success = False
try: # Make one attempt on a flight, we will have to manually check failures.
# Unpack the DEM and ORTHO tarballs into new folders.
logger.info('Unpacking tarballs...')
flight_title = this_campaign.site +'_'+ date.yyyymmdd()
folder_prefix = os.path.join(unpack_folder, flight_title)
dem_folder = folder_prefix + '_dems'
ortho_folder = folder_prefix + '_orthos'
camera_folder = folder_prefix + '_cameras'
summary_folder = folder_prefix + '_summaries' # Temporary folder until the summary files are split
dem_summary_folder = folder_prefix + '_dem_summaries'
ortho_summary_folder = folder_prefix + '_ortho_summaries'
dem_tarball = findDemTarball (this_campaign, date)
ortho_tarball = findOrthoTarball (this_campaign, date)
camera_tarball = findCameraTarball (this_campaign, date)
summary_tarball = findSummaryTarball(this_campaign, date)
logger.info('Using output folder prefix: ' + folder_prefix)
logger.info('Found DEM tarball: ' + dem_tarball )
logger.info('Found ORTHO tarball: ' + ortho_tarball )
logger.info('Found CAMERA tarball: ' + camera_tarball)
logger.info('Found SUMMARY tarball: ' + summary_tarball )
dem_files = unpackTarAndGetFileList(dem_tarball, dem_folder, flight_title, logger)
ortho_files = unpackTarAndGetFileList(ortho_tarball, ortho_folder, flight_title, logger)
summary_files = unpackTarAndGetFileList(summary_tarball, summary_folder, flight_title, logger, isSummary=True)
logger.info('Found ' + str(len(dem_files)) +' dem files and ' + str(len(ortho_files)) +' ortho files.')
# Split out the summary files into dem and ortho files and refresh the file list
logger.info('Splitting out summary files...')
os.system('mkdir '+ dem_summary_folder )
os.system('mkdir '+ ortho_summary_folder)
for f in os.listdir(summary_folder):
absPath = os.path.join(summary_folder, f)
if 'dem' in f:
new_path = os.path.join(dem_summary_folder, f)
shutil.move(absPath, new_path)
if 'ortho' in f:
new_path = os.path.join(ortho_summary_folder, f)
shutil.move(absPath, new_path)
dem_summary_files = [os.path.join(dem_summary_folder, f) for f in os.listdir(dem_summary_folder )]
ortho_summary_files = [os.path.join(ortho_summary_folder, f) for f in os.listdir(ortho_summary_folder)]
# Handle old cases where the archived files don't have timestamps
# - Does this catch all the cases?
last_dem_name = os.path.basename(dem_files[-1] )
last_ortho_name = os.path.basename(ortho_files[-1] )
logger.info('Updating main file names with timestamps...')
# Fetch the camera files, then steal the timestamps from them.
camera_files = unpackTarAndGetFileList(camera_tarball, camera_folder, flight_title, logger)
logger.info('Found ' + str(len(camera_files)) +' camera files.')
if (last_dem_name.startswith('F') or last_dem_name.startswith('batch')):
add_timestamps_to_files(dem_files, camera_files, 'DEM', False)
if (last_ortho_name.startswith('F') or last_ortho_name.startswith('batch')):
add_timestamps_to_files(ortho_files, camera_files, 'ORTHO', False)
# The summary files never had timestamps
logger.info('Updating summary file names with timestamps...')
add_timestamps_to_files(dem_summary_files, camera_files, 'DEM', True)
add_timestamps_to_files(ortho_summary_files, camera_files, 'ORTHO', True)
#raise Exception('DEBUG')
logger.info('Check for missing DEM summaries...')
replaceMissingSummaryFiles(dem_folder, dem_summary_folder, False, logger)
logger.info('Check for missing ORTHO summaries...')
replaceMissingSummaryFiles(ortho_folder, ortho_summary_folder, True, logger)
logger.info('Executing uploader prep script...')
# Execute the BatcherIODEM3PremetSpatialMaker.sh script
# - This will create some supporting files for each DEM file.
# - Try a few times in case it fails.
MAX_NUM_TRIES = 3
numTries = 0
while numTries < MAX_NUM_TRIES:
try:
numTries += 1
cmd = 'python ' + SOFTWARE_FOLDER + 'Parallel_BatcherIODEM3PremetSpatialMaker.py ' + unpack_folder +' '+ date.yyyymmdd()
logger.info(cmd)
print(cmd)
os.system(cmd)
# Check that the preliminary files are all there.
verifyMetadataFiles(dem_folder, ['spo', 'premet'], logger)
verifyMetadataFiles(ortho_folder, ['spo', 'premet'], logger)
except RuntimeError as e:
logger.error(str(e))
if numTries < MAX_NUM_TRIES:
logger.info('Trying Premet gen again...')
print 'Trying Premet gen again.....'
else:
raise Exception('Premet gen failed after ' + str(numTries) + ' attempts!')
#raise Exception('DEBUG')
print 'Executing MetGen script on DEMS.....'
logger.info('Executing MetGen script on DEMS...')
# Need to update "Primary.cfg" for every input folder.
PRIMARY_CONFIG_PATH = SOFTWARE_FOLDER + 'Primary.cfg.src'
TEMP_CONFIG_PATH_DEM = SOFTWARE_FOLDER + 'Primary_dem.cfg'
TEMP_CONFIG_PATH_ORTHO = SOFTWARE_FOLDER + 'Primary_ortho.cfg'
# Generate more metadata files
logger.info('Launching MetGen.Start commands...')
makeConfigCopy(PRIMARY_CONFIG_PATH, TEMP_CONFIG_PATH_DEM, dem_folder, dem_summary_folder, isDim=False)
makeConfigCopy(PRIMARY_CONFIG_PATH, TEMP_CONFIG_PATH_ORTHO, ortho_folder, ortho_summary_folder, isDim=True )
# This process sometimes randomly fails on a few frames, so re-run it
# in case this happens.
numTries = 0
while numTries < MAX_NUM_TRIES:
try:
numTries += 1
# This is a java tool so we run both instances in parallel.
demMetGenProcess = multiprocessing.Process(target=runMetGen, args=(TEMP_CONFIG_PATH_DEM,))
orthoMetGenProcess = multiprocessing.Process(target=runMetGen, args=(TEMP_CONFIG_PATH_ORTHO,))
demMetGenProcess.start()
orthoMetGenProcess.start()
demMetGenProcess.join()
orthoMetGenProcess.join()
logger.info('MetGen processes are finished!')
# Check that we created all of the metadata files
verifyMetadataFiles(dem_folder, ['PDR', 'met'], logger)
verifyMetadataFiles(ortho_folder, ['PDR', 'met'], logger) # Throws RuntimeError on failure.
break # Quit the loop on success.
except RuntimeError as e:
logger.error(str(e))
if numTries < MAX_NUM_TRIES:
logger.info('Trying MetGen again...')
print 'Trying MetGen again.....'
else:
raise Exception('Metgen failed after ' + str(numTries) + ' attempts!')
logger.info('Verified that all metadata files have been generated.')
#raise Exception('DEBUG')
# Start up the uploading thread for this flight,
# then start preparing the files for the next flight.
print 'Calling upload manager!'
remote_folder = str(date.year) +'/'+ this_campaign.site +'_'+ date.yyyy_mm_dd()
uManager.uploadFlight(dem_folder, ortho_folder, dem_summary_folder,
ortho_summary_folder, folder_prefix,
dem_summary_files != [], ortho_summary_files != [],
dem_tarball, ortho_tarball, camera_tarball, summary_tarball,
remote_folder, date, logger)
except Exception as e:
print 'Caught exception ' + str(e)
if str(e) != "DEBUG":
updateLogFile(FAILED_DATES_FILE, date, dem_tarball, ortho_tarball,
camera_tarball, summary_tarball,
dem_summary_files != [],
ortho_summary_files != [])
logger.error('FAILED to process flight date '+ str(date))
sendEmail(EMAIL_ADDRESS, 'Caught exception for flight: ' + str(date), str(e))
num_uploaded += 1
if num_uploaded >= UPLOAD_LIMIT:
print 'Hit the upload limit!'
print 'Waiting for the uploader to finish...'
uManager.cleanup()
return 0
if __name__=='__main__':
main()
sendEmail(EMAIL_ADDRESS, 'Finished running OIB uploader.', 'NT')
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
import logging
import os
import posixpath
from devil.android import device_errors
from devil.android import device_temp_file
from devil.android import ports
from devil.utils import reraiser_thread
from pylib import constants
from pylib.base import base_test_result
from pylib.gtest import gtest_test_instance
from pylib.local import local_test_server_spawner
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_test_run
_COMMAND_LINE_FLAGS_SUPPORTED = True
_MAX_INLINE_FLAGS_LENGTH = 50 # Arbitrarily chosen.
_EXTRA_COMMAND_LINE_FILE = (
'org.chromium.native_test.NativeTest.CommandLineFile')
_EXTRA_COMMAND_LINE_FLAGS = (
'org.chromium.native_test.NativeTest.CommandLineFlags')
_EXTRA_TEST_LIST = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner'
'.TestList')
_EXTRA_TEST = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner'
'.Test')
_MAX_SHARD_SIZE = 256
_SECONDS_TO_NANOS = int(1e9)
# The amount of time a test executable may run before it gets killed.
_TEST_TIMEOUT_SECONDS = 30*60
# TODO(jbudorick): Move this up to the test instance if the net test server is
# handled outside of the APK for the remote_device environment.
_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
'components_browsertests', 'content_unittests', 'content_browsertests',
'net_unittests', 'unit_tests'
]
# No-op context manager. If we used Python 3, we could change this to
# contextlib.ExitStack()
class _NullContextManager(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
# TODO(jbudorick): Move this inside _ApkDelegate once TestPackageApk is gone.
def PullAppFilesImpl(device, package, files, directory):
device_dir = device.GetApplicationDataDirectory(package)
host_dir = os.path.join(directory, str(device))
for f in files:
device_file = posixpath.join(device_dir, f)
host_file = os.path.join(host_dir, *f.split(posixpath.sep))
host_file_base, ext = os.path.splitext(host_file)
for i in itertools.count():
host_file = '%s_%d%s' % (host_file_base, i, ext)
if not os.path.exists(host_file):
break
device.PullFile(device_file, host_file)
def _ExtractTestsFromFilter(gtest_filter):
"""Returns the list of tests specified by the given filter.
Returns:
None if the device should be queried for the test list instead.
"""
# Empty means all tests, - means exclude filter.
if not gtest_filter or '-' in gtest_filter:
return None
patterns = gtest_filter.split(':')
# For a single pattern, allow it even if it has a wildcard so long as the
# wildcard comes at the end and there is at least one . to prove the scope is
# not too large.
# This heuristic is not necessarily faster, but normally is.
if len(patterns) == 1 and patterns[0].endswith('*'):
no_suffix = patterns[0].rstrip('*')
if '*' not in no_suffix and '.' in no_suffix:
return patterns
if '*' in gtest_filter:
return None
return patterns
class _ApkDelegate(object):
def __init__(self, test_instance):
self._activity = test_instance.activity
self._apk_helper = test_instance.apk_helper
self._test_apk_incremental_install_script = (
test_instance.test_apk_incremental_install_script)
self._package = test_instance.package
self._runner = test_instance.runner
self._permissions = test_instance.permissions
self._suite = test_instance.suite
self._component = '%s/%s' % (self._package, self._runner)
self._extras = test_instance.extras
def Install(self, device):
if self._test_apk_incremental_install_script:
local_device_test_run.IncrementalInstall(device, self._apk_helper,
self._test_apk_incremental_install_script)
else:
device.Install(self._apk_helper, reinstall=True,
permissions=self._permissions)
def Run(self, test, device, flags=None, **kwargs):
extras = dict(self._extras)
if ('timeout' in kwargs
and gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in extras):
# Make sure the instrumentation doesn't kill the test before the
# scripts do. The provided timeout value is in seconds, but the
# instrumentation deals with nanoseconds because that's how Android
# handles time.
extras[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int(
kwargs['timeout'] * _SECONDS_TO_NANOS)
command_line_file = _NullContextManager()
if flags:
if len(flags) > _MAX_INLINE_FLAGS_LENGTH:
command_line_file = device_temp_file.DeviceTempFile(device.adb)
device.WriteFile(command_line_file.name, '_ %s' % flags)
extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name
else:
extras[_EXTRA_COMMAND_LINE_FLAGS] = flags
test_list_file = _NullContextManager()
if test:
if len(test) > 1:
test_list_file = device_temp_file.DeviceTempFile(device.adb)
device.WriteFile(test_list_file.name, '\n'.join(test))
extras[_EXTRA_TEST_LIST] = test_list_file.name
else:
extras[_EXTRA_TEST] = test[0]
with command_line_file, test_list_file:
try:
return device.StartInstrumentation(
self._component, extras=extras, raw=False, **kwargs)
except Exception:
device.ForceStop(self._package)
raise
def PullAppFiles(self, device, files, directory):
PullAppFilesImpl(device, self._package, files, directory)
def Clear(self, device):
device.ClearApplicationState(self._package, permissions=self._permissions)
class _ExeDelegate(object):
def __init__(self, tr, dist_dir):
self._host_dist_dir = dist_dir
self._exe_file_name = os.path.basename(dist_dir)[:-len('__dist')]
self._device_dist_dir = posixpath.join(
constants.TEST_EXECUTABLE_DIR, os.path.basename(dist_dir))
self._test_run = tr
def Install(self, device):
# TODO(jbudorick): Look into merging this with normal data deps pushing if
# executables become supported on nonlocal environments.
device.PushChangedFiles([(self._host_dist_dir, self._device_dist_dir)],
delete_device_stale=True)
def Run(self, test, device, flags=None, **kwargs):
tool = self._test_run.GetTool(device).GetTestWrapper()
if tool:
cmd = [tool]
else:
cmd = []
cmd.append(posixpath.join(self._device_dist_dir, self._exe_file_name))
if test:
cmd.append('--gtest_filter=%s' % ':'.join(test))
if flags:
# TODO(agrieve): This won't work if multiple flags are passed.
cmd.append(flags)
cwd = constants.TEST_EXECUTABLE_DIR
env = {
'LD_LIBRARY_PATH': self._device_dist_dir
}
try:
gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
external = device.GetExternalStoragePath()
env['GCOV_PREFIX'] = '%s/gcov' % external
env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
except (device_errors.CommandFailedError, KeyError):
pass
output = device.RunShellCommand(
cmd, cwd=cwd, env=env, check_return=False, large_output=True, **kwargs)
return output
def PullAppFiles(self, device, files, directory):
pass
def Clear(self, device):
device.KillAll(self._exe_file_name, blocking=True, timeout=30, quiet=True)
class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
super(LocalDeviceGtestRun, self).__init__(env, test_instance)
if self._test_instance.apk:
self._delegate = _ApkDelegate(self._test_instance)
elif self._test_instance.exe_dist_dir:
self._delegate = _ExeDelegate(self, self._test_instance.exe_dist_dir)
self._crashes = set()
self._servers = collections.defaultdict(list)
#override
def TestPackage(self):
return self._test_instance.suite
#override
def SetUp(self):
@local_device_environment.handle_shard_failures_with(
on_failure=self._env.BlacklistDevice)
def individual_device_set_up(dev):
def install_apk():
# Install test APK.
self._delegate.Install(dev)
def push_test_data():
# Push data dependencies.
device_root = posixpath.join(dev.GetExternalStoragePath(),
'chromium_tests_root')
data_deps = self._test_instance.GetDataDependencies()
host_device_tuples = [
(h, d if d is not None else device_root)
for h, d in data_deps]
dev.PushChangedFiles(host_device_tuples, delete_device_stale=True)
if not host_device_tuples:
dev.RunShellCommand(['rm', '-rf', device_root], check_return=True)
dev.RunShellCommand(['mkdir', '-p', device_root], check_return=True)
def init_tool_and_start_servers():
tool = self.GetTool(dev)
tool.CopyFiles(dev)
tool.SetupEnvironment()
self._servers[str(dev)] = []
if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
self._servers[str(dev)].append(
local_test_server_spawner.LocalTestServerSpawner(
ports.AllocateTestServerPort(), dev, tool))
for s in self._servers[str(dev)]:
s.SetUp()
steps = (install_apk, push_test_data, init_tool_and_start_servers)
if self._env.concurrent_adb:
reraiser_thread.RunAsync(steps)
else:
for step in steps:
step()
self._env.parallel_devices.pMap(individual_device_set_up)
#override
def _ShouldShard(self):
return True
#override
def _CreateShards(self, tests):
# _crashes are tests that might crash and make the tests in the same shard
# following the crashed testcase not run.
# Thus we need to create separate shards for each crashed testcase,
# so that other tests can be run.
device_count = len(self._env.devices)
shards = []
# Add shards with only one suspect testcase.
shards += [[crash] for crash in self._crashes if crash in tests]
# Delete suspect testcase from tests.
tests = [test for test in tests if not test in self._crashes]
for i in xrange(0, device_count):
unbounded_shard = tests[i::device_count]
shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE]
for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)]
return shards
#override
def _GetTests(self):
if self._test_instance.extract_test_list_from_filter:
# When the exact list of tests to run is given via command-line (e.g. when
# locally iterating on a specific test), skip querying the device (which
# takes ~3 seconds).
tests = _ExtractTestsFromFilter(self._test_instance.gtest_filter)
if tests:
return tests
# Even when there's only one device, it still makes sense to retrieve the
# test list so that tests can be split up and run in batches rather than all
# at once (since test output is not streamed).
@local_device_environment.handle_shard_failures_with(
on_failure=self._env.BlacklistDevice)
def list_tests(dev):
raw_test_list = self._delegate.Run(
None, dev, flags='--gtest_list_tests', timeout=30)
tests = gtest_test_instance.ParseGTestListTests(raw_test_list)
if not tests:
logging.info('No tests found. Output:')
for l in raw_test_list:
logging.info(' %s', l)
tests = self._test_instance.FilterTests(tests)
return tests
# Query all devices in case one fails.
test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
# If all devices failed to list tests, raise an exception.
# Check that tl is not None and is not empty.
if all(not tl for tl in test_lists):
raise device_errors.CommandFailedError(
'Failed to list tests on any device')
return list(sorted(set().union(*[set(tl) for tl in test_lists if tl])))
#override
def _RunTest(self, device, test):
# Run the test.
timeout = (self._test_instance.shard_timeout
* self.GetTool(device).GetTimeoutScale())
output = self._delegate.Run(
test, device, flags=self._test_instance.test_arguments,
timeout=timeout, retries=0)
for s in self._servers[str(device)]:
s.Reset()
if self._test_instance.app_files:
self._delegate.PullAppFiles(device, self._test_instance.app_files,
self._test_instance.app_file_dir)
if not self._env.skip_clear_data:
self._delegate.Clear(device)
# Parse the output.
# TODO(jbudorick): Transition test scripts away from parsing stdout.
results = gtest_test_instance.ParseGTestOutput(output)
# Check whether there are any crashed testcases.
self._crashes.update(r.GetName() for r in results
if r.GetType() == base_test_result.ResultType.CRASH)
return results
#override
def TearDown(self):
@local_device_environment.handle_shard_failures
def individual_device_tear_down(dev):
for s in self._servers.get(str(dev), []):
s.TearDown()
tool = self.GetTool(dev)
tool.CleanUpEnvironment()
self._env.parallel_devices.pMap(individual_device_tear_down)
|
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import dictdiffer
import networkx as nx
from solar.core.log import log
from solar.core import resource
from solar.core.resource.resource import RESOURCE_STATE
from solar.core import signals
from solar.dblayer.solar_models import CommitedResource
from solar.dblayer.solar_models import LogItem
from solar.dblayer.solar_models import StrInt
from solar.events import api as evapi
from solar.events.controls import StateChange
from solar.orchestration import graph
from solar.system_log import data
from solar import utils
from solar.system_log.consts import CHANGES
def guess_action(from_, to):
# NOTE(dshulyak) imo the way to solve this - is dsl for orchestration,
# something where this action will be excplicitly specified
if not from_:
return CHANGES.run.name
elif not to:
return CHANGES.remove.name
else:
return CHANGES.update.name
def create_diff(staged, commited):
return list(dictdiffer.diff(commited, staged))
def create_logitem(resource, action, diffed, connections_diffed,
base_path=''):
return LogItem.new(
{'resource': resource,
'action': action,
'diff': diffed,
'connections_diff': connections_diffed,
'base_path': base_path,
'log': 'staged'})
def create_sorted_diff(staged, commited):
staged.sort()
commited.sort()
return create_diff(staged, commited)
def make_single_stage_item(resource_obj):
commited = resource_obj.load_commited()
base_path = resource_obj.base_path
if resource_obj.to_be_removed():
resource_args = {}
resource_connections = []
else:
resource_args = resource_obj.args
resource_connections = resource_obj.connections
if commited.state == RESOURCE_STATE.removed.name:
commited_args = {}
commited_connections = []
else:
commited_args = commited.inputs
commited_connections = commited.connections
inputs_diff = create_diff(resource_args, commited_args)
connections_diff = create_sorted_diff(
resource_connections, commited_connections)
# if new connection created it will be reflected in inputs
# but using inputs to reverse connections is not possible
if inputs_diff:
li = create_logitem(
resource_obj.name,
guess_action(commited_args, resource_args),
inputs_diff,
connections_diff,
base_path=base_path)
li.save()
return li
return None
def stage_changes():
for li in data.SL():
li.delete()
last = LogItem.history_last()
since = StrInt.greater(last.updated) if last else None
staged_log = utils.solar_map(make_single_stage_item,
resource.load_updated(since), concurrency=10)
staged_log = filter(None, staged_log)
return staged_log
def send_to_orchestration():
dg = nx.MultiDiGraph()
events = {}
changed_nodes = []
for logitem in data.SL():
events[logitem.resource] = evapi.all_events(logitem.resource)
changed_nodes.append(logitem.resource)
state_change = StateChange(logitem.resource, logitem.action)
state_change.insert(changed_nodes, dg)
evapi.build_edges(dg, events)
# what `name` should be?
dg.graph['name'] = 'system_log'
return graph.create_plan_from_graph(dg)
def parameters(res, action, data):
return {'args': [res, action],
'type': 'solar_resource'}
def _get_args_to_update(args, connections):
"""Returns args to update
For each resource we can update only args that are not provided
by connections
"""
inherited = [i[3].split(':')[0] for i in connections]
return {
key: args[key] for key in args
if key not in inherited
}
def revert_uids(uids):
"""Reverts uids
:param uids: iterable not generator
"""
items = LogItem.multi_get(uids)
for item in items:
if item.action == CHANGES.update.name:
_revert_update(item)
elif item.action == CHANGES.remove.name:
_revert_remove(item)
elif item.action == CHANGES.run.name:
_revert_run(item)
else:
log.debug('Action %s for resource %s is a side'
' effect of another action', item.action, item.res)
def _revert_remove(logitem):
"""Resource should be created with all previous connections"""
commited = CommitedResource.get(logitem.resource)
args = dictdiffer.revert(logitem.diff, commited.inputs)
connections = dictdiffer.revert(
logitem.connections_diff, sorted(commited.connections))
resource.Resource(logitem.resource, logitem.base_path,
args=_get_args_to_update(args, connections),
tags=commited.tags)
for emitter, emitter_input, receiver, receiver_input in connections:
emmiter_obj = resource.load(emitter)
receiver_obj = resource.load(receiver)
signals.connect(emmiter_obj, receiver_obj, {
emitter_input: receiver_input})
def _update_inputs_connections(res_obj, args, old_connections, new_connections): # NOQA
removed = []
for item in old_connections:
if item not in new_connections:
removed.append(item)
added = []
for item in new_connections:
if item not in old_connections:
added.append(item)
for emitter, _, receiver, _ in removed:
emmiter_obj = resource.load(emitter)
receiver_obj = resource.load(receiver)
emmiter_obj.disconnect(receiver_obj)
for emitter, emitter_input, receiver, receiver_input in added:
emmiter_obj = resource.load(emitter)
receiver_obj = resource.load(receiver)
emmiter_obj.connect(receiver_obj, {emitter_input: receiver_input})
if removed or added:
# TODO without save we will get error
# that some values can not be updated
# even if connection was removed
receiver_obj.db_obj.save()
res_obj.update(args)
def _revert_update(logitem):
"""Revert of update should update inputs and connections"""
res_obj = resource.load(logitem.resource)
commited = res_obj.load_commited()
connections = dictdiffer.revert(
logitem.connections_diff, sorted(commited.connections))
args = dictdiffer.revert(logitem.diff, commited.inputs)
_update_inputs_connections(
res_obj, _get_args_to_update(args, connections),
commited.connections, connections)
def _revert_run(logitem):
res_obj = resource.load(logitem.resource)
res_obj.remove()
def revert(uid):
return revert_uids([uid])
def _discard_remove(item):
resource_obj = resource.load(item.resource)
resource_obj.set_created()
def _discard_update(item):
resource_obj = resource.load(item.resource)
old_connections = resource_obj.connections
new_connections = dictdiffer.revert(
item.connections_diff, sorted(old_connections))
args = dictdiffer.revert(item.diff, resource_obj.args)
_update_inputs_connections(
resource_obj, _get_args_to_update(args, new_connections),
old_connections, new_connections)
def _discard_run(item):
resource.load(item.resource).remove(force=True)
def discard_uids(uids):
items = LogItem.multi_get(uids)
for item in items:
if item.action == CHANGES.update.name:
_discard_update(item)
elif item.action == CHANGES.remove.name:
_discard_remove(item)
elif item.action == CHANGES.run.name:
_discard_run(item)
else:
log.debug('Action %s for resource %s is a side'
' effect of another action', item.action, item.res)
item.delete()
def discard_uid(uid):
return discard_uids([uid])
def discard_all():
staged_log = data.SL()
return discard_uids([l.uid for l in staged_log])
def commit_all():
"""Helper mainly for ease of testing"""
from solar.system_log.operations import move_to_commited
for item in data.SL():
move_to_commited(item.log_action)
def clear_history():
LogItem.delete_all()
CommitedResource.delete_all()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway, local_connect_and_auth
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer, ChunkedStream
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
# If encryption is enabled, we need to setup a server in the jvm to read broadcast
# data via a socket.
# scala's mangled names w/ $ in them require special treatment.
self._encryption_enabled = self._jvm.PythonUtils.getEncryptionEnabled(self._jsc)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
:param data:
:param serializer:
:param reader_func: A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. Only used when encryption is disabled.
:param createRDDServer: A function which creates a PythonRDDServer in the jvm to
accept the serialized data, for use when encryption is enabled.
:return:
"""
if self._encryption_enabled:
# with encryption, we open a server in java and send the data directly
server = createRDDServer()
(sock_file, _) = local_connect_and_auth(server.port(), server.secret())
chunked_out = ChunkedStream(sock_file, 8192)
serializer.dump_stream(data, chunked_out)
chunked_out.close()
# this call will block until the server has read all the data and processed it (or
# throws an exception)
r = server.getResult()
return r
else:
# without encryption, we serialize to a file, and we read the file in java and
# parallelize from there.
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
try:
serializer.dump_stream(data, tempFile)
finally:
tempFile.close()
return reader_func(tempFile.name)
finally:
# we eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
import numpy as np
import tables as tb
class Particle(tb.IsDescription):
ADCcount = tb.Int16Col() # signed short integer
TDCcount = tb.UInt8Col() # unsigned byte
grid_i = tb.Int32Col() # integer
grid_j = tb.Int32Col() # integer
idnumber = tb.Int64Col() # signed long long
name = tb.StringCol(16, dflt="") # 16-character String
pressure = tb.Float32Col(shape=2) # float (single-precision)
temperature = tb.Float64Col() # double (double-precision)
Particle2 = {
# You can also use any of the atom factories, i.e. the one which
# accepts a PyTables type.
"ADCcount": tb.Col.from_type("int16"), # signed short integer
"TDCcount": tb.Col.from_type("uint8"), # unsigned byte
"grid_i": tb.Col.from_type("int32"), # integer
"grid_j": tb.Col.from_type("int32"), # integer
"idnumber": tb.Col.from_type("int64"), # signed long long
"name": tb.Col.from_kind("string", 16), # 16-character String
"pressure": tb.Col.from_type("float32", (2,)), # float
# (single-precision)
"temperature": tb.Col.from_type("float64"), # double
# (double-precision)
}
# The name of our HDF5 filename
filename = "table-tree.h5"
# Open a file in "w"rite mode
h5file = tb.open_file(filename, mode="w")
# Create a new group under "/" (root)
group = h5file.create_group("/", 'detector')
# Create one table on it
# table = h5file.create_table(group, 'table', Particle, "Title example")
# You can choose creating a Table from a description dictionary if you wish
table = h5file.create_table(group, 'table', Particle2, "Title example")
# Create a shortcut to the table record object
particle = table.row
# Fill the table with 10 particles
for i in range(10):
# First, assign the values to the Particle record
particle['name'] = 'Particle: %6d' % (i)
particle['TDCcount'] = i % 256
particle['ADCcount'] = (i * 256) % (1 << 16)
particle['grid_i'] = i
particle['grid_j'] = 10 - i
particle['pressure'] = [float(i * i), float(i * 2)]
particle['temperature'] = float(i ** 2)
particle['idnumber'] = i * (2 ** 34) # This exceeds integer range
# This injects the Record values.
particle.append()
# Flush the buffers for table
table.flush()
# Get actual data from table. We are interested in column pressure.
pressure = [p['pressure'] for p in table.iterrows()]
print("Last record ==>", pressure)
print("Column pressure ==>", np.array(pressure))
print("Total records in table ==> ", len(pressure))
print()
# Create a new group to hold new arrays
gcolumns = h5file.create_group("/", "columns")
print("columns ==>", gcolumns, pressure)
# Create an array with this info under '/columns' having a 'list' flavor
h5file.create_array(gcolumns, 'pressure', pressure,
"Pressure column")
print("gcolumns.pressure type ==> ", gcolumns.pressure.atom.dtype)
# Do the same with TDCcount, but with a numpy object
TDC = [p['TDCcount'] for p in table.iterrows()]
print("TDC ==>", TDC)
print("TDC shape ==>", np.array(TDC).shape)
h5file.create_array('/columns', 'TDC', np.array(TDC), "TDCcount column")
# Do the same with name column
names = [p['name'] for p in table.iterrows()]
print("names ==>", names)
h5file.create_array('/columns', 'name', names, "Name column")
# This works even with homogeneous tuples or lists (!)
print("gcolumns.name shape ==>", gcolumns.name.shape)
print("gcolumns.name type ==> ", gcolumns.name.atom.dtype)
print("Table dump:")
for p in table.iterrows():
print(p)
# Save a recarray object under detector
r = np.rec.array("a" * 300, formats='f4,3i4,a5,i2', shape=3)
recarrt = h5file.create_table("/detector", 'recarray', r, "RecArray example")
r2 = r[0:3:2]
# Change the byteorder property
recarrt = h5file.create_table("/detector", 'recarray2', r2,
"Non-contiguous recarray")
print(recarrt)
print()
print(h5file.root.detector.table.description)
# Close the file
h5file.close()
# sys.exit()
# Reopen it in append mode
h5file = tb.open_file(filename, "a")
# Ok. let's start browsing the tree from this filename
print("Reading info from filename:", h5file.filename)
print()
# Firstly, list all the groups on tree
print("Groups in file:")
for group in h5file.walk_groups("/"):
print(group)
print()
# List all the nodes (Group and Leaf objects) on tree
print("List of all nodes in file:")
print(h5file)
# And finally, only the Arrays (Array objects)
print("Arrays in file:")
for array in h5file.walk_nodes("/", classname="Array"):
print(array)
print()
# Get group /detector and print some info on it
detector = h5file.get_node("/detector")
print("detector object ==>", detector)
# List only leaves on detector
print("Leaves in group", detector, ":")
for leaf in h5file.list_nodes("/detector", 'Leaf'):
print(leaf)
print()
# List only tables on detector
print("Tables in group", detector, ":")
for leaf in h5file.list_nodes("/detector", 'Table'):
print(leaf)
print()
# List only arrays on detector (there should be none!)
print("Arrays in group", detector, ":")
for leaf in h5file.list_nodes("/detector", 'Array'):
print(leaf)
print()
# Get "/detector" Group object
group = h5file.root.detector
print("/detector ==>", group)
# Get the "/detector/table
table = h5file.get_node("/detector/table")
print("/detector/table ==>", table)
# Get metadata from table
print("Object:", table)
print("Table name:", table.name)
print("Table title:", table.title)
print("Rows saved on table: %d" % (table.nrows))
print("Variable names on table with their type:")
for name in table.colnames:
print(" ", name, ':=', table.coldtypes[name])
print()
# Read arrays in /columns/names and /columns/pressure
# Get the object in "/columns pressure"
pressureObject = h5file.get_node("/columns", "pressure")
# Get some metadata on this object
print("Info on the object:", pressureObject)
print(" shape ==>", pressureObject.shape)
print(" title ==>", pressureObject.title)
print(" type ==> ", pressureObject.atom.dtype)
print(" byteorder ==> ", pressureObject.byteorder)
# Read the pressure actual data
pressureArray = pressureObject.read()
print(" data type ==>", type(pressureArray))
print(" data ==>", pressureArray)
print()
# Get the object in "/columns/names"
nameObject = h5file.root.columns.name
# Get some metadata on this object
print("Info on the object:", nameObject)
print(" shape ==>", nameObject.shape)
print(" title ==>", nameObject.title)
print(" type ==> " % nameObject.atom.dtype)
# Read the 'name' actual data
nameArray = nameObject.read()
print(" data type ==>", type(nameArray))
print(" data ==>", nameArray)
# Print the data for both arrays
print("Data on arrays name and pressure:")
for i in range(pressureObject.shape[0]):
print("".join(nameArray[i]), "-->", pressureArray[i])
print()
# Finally, append some new records to table
table = h5file.root.detector.table
# Append 5 new particles to table (yes, tables can be enlarged!)
particle = table.row
for i in range(10, 15):
# First, assign the values to the Particle record
particle['name'] = 'Particle: %6d' % (i)
particle['TDCcount'] = i % 256
particle['ADCcount'] = (i * 256) % (1 << 16)
particle['grid_i'] = i
particle['grid_j'] = 10 - i
particle['pressure'] = [float(i * i), float(i * 2)]
particle['temperature'] = float(i ** 2)
particle['idnumber'] = i * (2 ** 34) # This exceeds integer range
# This injects the Row values.
particle.append()
# Flush this table
table.flush()
print("Columns name and pressure on expanded table:")
# Print some table columns, for comparison with array data
for p in table:
print(p['name'], '-->', p['pressure'])
print()
# Put several flavors
oldflavor = table.flavor
print(table.read(field="ADCcount"))
table.flavor = "numpy"
print(table.read(field="ADCcount"))
table.flavor = oldflavor
print(table.read(0, 0, 1, "name"))
table.flavor = "python"
print(table.read(0, 0, 1, "name"))
table.flavor = oldflavor
print(table.read(0, 0, 2, "pressure"))
table.flavor = "python"
print(table.read(0, 0, 2, "pressure"))
table.flavor = oldflavor
# Several range selections
print("Extended slice in selection: [0:7:6]")
print(table.read(0, 7, 6))
print("Single record in selection: [1]")
print(table.read(1))
print("Last record in selection: [-1]")
print(table.read(-1))
print("Two records before the last in selection: [-3:-1]")
print(table.read(-3, -1))
# Print a recarray in table form
table = h5file.root.detector.recarray2
print("recarray2:", table)
print(" nrows:", table.nrows)
print(" byteorder:", table.byteorder)
print(" coldtypes:", table.coldtypes)
print(" colnames:", table.colnames)
print(table.read())
for p in table.iterrows():
print(p['f1'], '-->', p['f2'])
print()
result = [rec['f1'] for rec in table if rec.nrow < 2]
print(result)
# Test the File.rename_node() method
# h5file.rename_node(h5file.root.detector.recarray2, "recarray3")
h5file.rename_node(table, "recarray3")
# Delete a Leaf from the HDF5 tree
h5file.remove_node(h5file.root.detector.recarray3)
# Delete the detector group and its leaves recursively
# h5file.remove_node(h5file.root.detector, recursive=1)
# Create a Group and then remove it
h5file.create_group(h5file.root, "newgroup")
h5file.remove_node(h5file.root, "newgroup")
h5file.rename_node(h5file.root.columns, "newcolumns")
print(h5file)
# Close this file
h5file.close()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add time zone awareness
Revision ID: 0e2a74e0fc9f
Revises: d2ae31099d61
Create Date: 2017-11-10 22:22:31.326152
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "0e2a74e0fc9f"
down_revision = "d2ae31099d61"
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
conn = op.get_bind()
if conn.dialect.name == "mysql":
conn.execute("SET time_zone = '+00:00'")
cur = conn.execute("SELECT @@explicit_defaults_for_timestamp")
res = cur.fetchall()
if res[0][0] == 0:
raise Exception("Global variable explicit_defaults_for_timestamp needs to be on (1) for mysql")
op.alter_column(
table_name="chart",
column_name="last_modified",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(
table_name="dag",
column_name="last_scheduler_run",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(table_name="dag", column_name="last_pickled", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(table_name="dag", column_name="last_expired", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(
table_name="dag_pickle",
column_name="created_dttm",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(
table_name="dag_run",
column_name="execution_date",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(table_name="dag_run", column_name="start_date", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(table_name="dag_run", column_name="end_date", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(
table_name="import_error",
column_name="timestamp",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(table_name="job", column_name="start_date", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(table_name="job", column_name="end_date", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(
table_name="job",
column_name="latest_heartbeat",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(table_name="log", column_name="dttm", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(table_name="log", column_name="execution_date", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(
table_name="sla_miss",
column_name="execution_date",
type_=mysql.TIMESTAMP(fsp=6),
nullable=False,
)
op.alter_column(table_name="sla_miss", column_name="timestamp", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(
table_name="task_fail",
column_name="execution_date",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(
table_name="task_fail",
column_name="start_date",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(table_name="task_fail", column_name="end_date", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(
table_name="task_instance",
column_name="execution_date",
type_=mysql.TIMESTAMP(fsp=6),
nullable=False,
)
op.alter_column(
table_name="task_instance",
column_name="start_date",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(
table_name="task_instance",
column_name="end_date",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(
table_name="task_instance",
column_name="queued_dttm",
type_=mysql.TIMESTAMP(fsp=6),
)
op.alter_column(table_name="xcom", column_name="timestamp", type_=mysql.TIMESTAMP(fsp=6))
op.alter_column(
table_name="xcom",
column_name="execution_date",
type_=mysql.TIMESTAMP(fsp=6),
)
else:
# sqlite and mssql datetime are fine as is. Therefore, not converting
if conn.dialect.name in ("sqlite", "mssql"):
return
# we try to be database agnostic, but not every db (e.g. sqlserver)
# supports per session time zones
if conn.dialect.name == "postgresql":
conn.execute("set timezone=UTC")
op.alter_column(
table_name="chart",
column_name="last_modified",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="dag",
column_name="last_scheduler_run",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="dag",
column_name="last_pickled",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="dag",
column_name="last_expired",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="dag_pickle",
column_name="created_dttm",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="dag_run",
column_name="execution_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="dag_run",
column_name="start_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="dag_run",
column_name="end_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="import_error",
column_name="timestamp",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="job",
column_name="start_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(table_name="job", column_name="end_date", type_=sa.TIMESTAMP(timezone=True))
op.alter_column(
table_name="job",
column_name="latest_heartbeat",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(table_name="log", column_name="dttm", type_=sa.TIMESTAMP(timezone=True))
op.alter_column(
table_name="log",
column_name="execution_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="sla_miss",
column_name="execution_date",
type_=sa.TIMESTAMP(timezone=True),
nullable=False,
)
op.alter_column(
table_name="sla_miss",
column_name="timestamp",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="task_fail",
column_name="execution_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="task_fail",
column_name="start_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="task_fail",
column_name="end_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="task_instance",
column_name="execution_date",
type_=sa.TIMESTAMP(timezone=True),
nullable=False,
)
op.alter_column(
table_name="task_instance",
column_name="start_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="task_instance",
column_name="end_date",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="task_instance",
column_name="queued_dttm",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="xcom",
column_name="timestamp",
type_=sa.TIMESTAMP(timezone=True),
)
op.alter_column(
table_name="xcom",
column_name="execution_date",
type_=sa.TIMESTAMP(timezone=True),
)
def downgrade(): # noqa: D103
conn = op.get_bind()
if conn.dialect.name == "mysql":
conn.execute("SET time_zone = '+00:00'")
op.alter_column(table_name="chart", column_name="last_modified", type_=mysql.DATETIME(fsp=6))
op.alter_column(
table_name="dag",
column_name="last_scheduler_run",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(table_name="dag", column_name="last_pickled", type_=mysql.DATETIME(fsp=6))
op.alter_column(table_name="dag", column_name="last_expired", type_=mysql.DATETIME(fsp=6))
op.alter_column(
table_name="dag_pickle",
column_name="created_dttm",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(
table_name="dag_run",
column_name="execution_date",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(table_name="dag_run", column_name="start_date", type_=mysql.DATETIME(fsp=6))
op.alter_column(table_name="dag_run", column_name="end_date", type_=mysql.DATETIME(fsp=6))
op.alter_column(
table_name="import_error",
column_name="timestamp",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(table_name="job", column_name="start_date", type_=mysql.DATETIME(fsp=6))
op.alter_column(table_name="job", column_name="end_date", type_=mysql.DATETIME(fsp=6))
op.alter_column(
table_name="job",
column_name="latest_heartbeat",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(table_name="log", column_name="dttm", type_=mysql.DATETIME(fsp=6))
op.alter_column(table_name="log", column_name="execution_date", type_=mysql.DATETIME(fsp=6))
op.alter_column(
table_name="sla_miss",
column_name="execution_date",
type_=mysql.DATETIME(fsp=6),
nullable=False,
)
op.alter_column(table_name="sla_miss", column_name="timestamp", type_=mysql.DATETIME(fsp=6))
op.alter_column(
table_name="task_fail",
column_name="execution_date",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(
table_name="task_fail",
column_name="start_date",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(table_name="task_fail", column_name="end_date", type_=mysql.DATETIME(fsp=6))
op.alter_column(
table_name="task_instance",
column_name="execution_date",
type_=mysql.DATETIME(fsp=6),
nullable=False,
)
op.alter_column(
table_name="task_instance",
column_name="start_date",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(
table_name="task_instance",
column_name="end_date",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(
table_name="task_instance",
column_name="queued_dttm",
type_=mysql.DATETIME(fsp=6),
)
op.alter_column(table_name="xcom", column_name="timestamp", type_=mysql.DATETIME(fsp=6))
op.alter_column(table_name="xcom", column_name="execution_date", type_=mysql.DATETIME(fsp=6))
else:
if conn.dialect.name in ("sqlite", "mssql"):
return
# we try to be database agnostic, but not every db (e.g. sqlserver)
# supports per session time zones
if conn.dialect.name == "postgresql":
conn.execute("set timezone=UTC")
op.alter_column(table_name="chart", column_name="last_modified", type_=sa.DateTime())
op.alter_column(table_name="dag", column_name="last_scheduler_run", type_=sa.DateTime())
op.alter_column(table_name="dag", column_name="last_pickled", type_=sa.DateTime())
op.alter_column(table_name="dag", column_name="last_expired", type_=sa.DateTime())
op.alter_column(table_name="dag_pickle", column_name="created_dttm", type_=sa.DateTime())
op.alter_column(table_name="dag_run", column_name="execution_date", type_=sa.DateTime())
op.alter_column(table_name="dag_run", column_name="start_date", type_=sa.DateTime())
op.alter_column(table_name="dag_run", column_name="end_date", type_=sa.DateTime())
op.alter_column(table_name="import_error", column_name="timestamp", type_=sa.DateTime())
op.alter_column(table_name="job", column_name="start_date", type_=sa.DateTime())
op.alter_column(table_name="job", column_name="end_date", type_=sa.DateTime())
op.alter_column(table_name="job", column_name="latest_heartbeat", type_=sa.DateTime())
op.alter_column(table_name="log", column_name="dttm", type_=sa.DateTime())
op.alter_column(table_name="log", column_name="execution_date", type_=sa.DateTime())
op.alter_column(
table_name="sla_miss",
column_name="execution_date",
type_=sa.DateTime(),
nullable=False,
)
op.alter_column(table_name="sla_miss", column_name="timestamp", type_=sa.DateTime())
op.alter_column(table_name="task_fail", column_name="execution_date", type_=sa.DateTime())
op.alter_column(table_name="task_fail", column_name="start_date", type_=sa.DateTime())
op.alter_column(table_name="task_fail", column_name="end_date", type_=sa.DateTime())
op.alter_column(
table_name="task_instance",
column_name="execution_date",
type_=sa.DateTime(),
nullable=False,
)
op.alter_column(table_name="task_instance", column_name="start_date", type_=sa.DateTime())
op.alter_column(table_name="task_instance", column_name="end_date", type_=sa.DateTime())
op.alter_column(table_name="task_instance", column_name="queued_dttm", type_=sa.DateTime())
op.alter_column(table_name="xcom", column_name="timestamp", type_=sa.DateTime())
op.alter_column(table_name="xcom", column_name="execution_date", type_=sa.DateTime())
|
|
# Copyright 2006 James Tauber and contributors
# Copyright (C) 2009, 2010 Luke Kenneth Casson Leighton <[email protected]>
# Copyright (C) 2010 Serge Tarkovski <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Window
from pyjamas import Factory
from __pyjamas__ import JS, doc
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui import MouseListener
from pyjamas.ui import KeyboardListener
class PopupPanel(SimplePanel):
_props = [("modal", "Modal", "Modal", None),
]
def __init__(self, autoHide=False, modal=True, rootpanel=None, glass=False,
**kwargs):
self.popupListeners = []
self.showing = False
self.autoHide = autoHide
kwargs['Modal'] = kwargs.get('Modal', modal)
if rootpanel is None:
rootpanel = RootPanel()
self.rootpanel = rootpanel
self.glass = glass
if self.glass:
self.glass = DOM.createDiv()
if not 'GlassStyleName' in kwargs:
kwargs['GlassStyleName'] = "gwt-PopupPanelGlass"
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
else:
element = self.createElement()
DOM.setStyleAttribute(element, "position", "absolute")
SimplePanel.__init__(self, element, **kwargs)
if glass:
self.setGlassEnabled(True)
if 'GlassStyleName' in kwargs:
self.setGlassStyleName(kwargs.pop('GlassStyleName'))
@classmethod
def _getProps(self):
return SimplePanel._getProps() + self._props
def addPopupListener(self, listener):
self.popupListeners.append(listener)
def getPopupLeft(self):
return DOM.getIntAttribute(self.getElement(), "offsetLeft")
def getPopupTop(self):
return DOM.getIntAttribute(self.getElement(), "offsetTop")
# PopupImpl.createElement
def createElement(self):
return DOM.createDiv()
def hide(self, autoClosed=False):
if not self.showing:
return
self.showing = False
if self.glass:
self.hideGlass()
DOM.removeEventPreview(self)
self.rootpanel.remove(self)
self.onHideImpl(self.getElement())
for listener in self.popupListeners:
if hasattr(listener, 'onPopupClosed'):
listener.onPopupClosed(self, autoClosed)
else:
listener(self, autoClosed)
def setModal(self, modal):
self.modal = modal
def getModal(self):
return self.isModal()
def isModal(self):
""" deprecated - please use getModal
"""
return self.modal
def _event_targets_popup(self, event):
target = DOM.eventGetTarget(event)
return target and DOM.isOrHasChild(self.getElement(), target)
def onEventPreview(self, event):
etype = DOM.eventGetType(event)
if etype == "keydown":
return ( self.onKeyDownPreview(
DOM.eventGetKeyCode(event),
KeyboardListener.getKeyboardModifiers(event)
)
and (not self.modal or self._event_targets_popup(event))
)
elif etype == "keyup":
return ( self.onKeyUpPreview(
DOM.eventGetKeyCode(event),
KeyboardListener.getKeyboardModifiers(event)
)
and (not self.modal or self._event_targets_popup(event))
)
elif etype == "keypress":
return ( self.onKeyPressPreview(
DOM.eventGetKeyCode(event),
KeyboardListener.getKeyboardModifiers(event)
)
and (not self.modal or self._event_targets_popup(event))
)
elif ( etype == "mousedown"
or etype == "blur"
):
if DOM.getCaptureElement() is not None:
return True
if self.autoHide and not self._event_targets_popup(event):
self.hide(True)
return True
elif ( etype == "mouseup"
or etype == "click"
or etype == "mousemove"
or type == "dblclick"
):
if DOM.getCaptureElement() is not None:
return True
return not self.modal or self._event_targets_popup(event)
def onKeyDownPreview(self, key, modifiers):
return True
def onKeyPressPreview(self, key, modifiers):
return True
def onKeyUpPreview(self, key, modifiers):
return True
# PopupImpl.onHide
def onHideImpl(self, popup):
pass
# PopupImpl.onShow
def onShowImpl(self, popup):
pass
def removePopupListener(self, listener):
self.popupListeners.remove(listener)
def setPopupPosition(self, left, top):
if isinstance(left, basestring):
if left.endswith('%'):
left = int(left[:-1])
left = int(left * Window.getClientWidth() / 100)
elif left.lower().endswith('px'):
left = int(left[:-2])
if isinstance(top, basestring):
if top.lower().endswith('%'):
top = int(top[:-1])
top = int(top * Window.getClientHeight() / 100)
elif top.endswith('px'):
top = int(top[:-2])
left = max(left, 0)
top = max(top, 0)
# Account for the difference between absolute position and the
# body's positioning context.
left -= DOM.getBodyOffsetLeft()
top -= DOM.getBodyOffsetTop()
element = self.getElement()
DOM.setStyleAttribute(element, "left", "%dpx" % left)
DOM.setStyleAttribute(element, "top", "%dpx" % top)
def isGlassEnabled(self):
return self.glass is not None
def setGlassEnabled(self, enabled):
if enabled:
if self.glass is None:
self.glass = DOM.createDiv()
self.setGlassStyleName()
elif self.glass is not None:
self.hideGlass()
def getGlassElement(self):
return self.glass
def setGlassStyleName(self, style="gwt-PopupPanelGlass"):
if self.glass:
DOM.setAttribute(self.glass, "className", style)
def getGlassStyleName(self):
if self.glass:
return DOM.setAttribute(self.glass, "className")
def setGlassPosition(self):
top = Window.getScrollTop()
left = Window.getScrollLeft()
height = Window.getClientHeight()
width = Window.getClientWidth()
DOM.setStyleAttribute(self.glass, "position", "absolute")
DOM.setStyleAttribute(self.glass, "left", "%s" % \
left if left == 0 else "%spx" % left)
DOM.setStyleAttribute(self.glass, "top", "%s" % \
top if top == 0 else "%spx" % top)
DOM.setStyleAttribute(self.glass, "height", "%spx" % (top + height))
DOM.setStyleAttribute(self.glass, "width", "%spx" % (left + width))
def showGlass(self):
Window.enableScrolling(False)
self.setGlassPosition()
doc().body.appendChild(self.glass)
Window.addWindowResizeListener(self)
def hideGlass(self):
Window.removeWindowResizeListener(self)
doc().body.removeChild(self.glass)
Window.enableScrolling(True)
def onWindowResized(self, width, height):
self.setGlassPosition()
def centerBox(self):
self_width = self.getOffsetWidth()
self_height = self.getOffsetHeight()
height = Window.getClientHeight()
width = Window.getClientWidth()
center_x = int(width) / 2
center_y = int(height) / 2
self_top = center_y - (int(self_height) / 2)
self_left = center_x - (int(self_width) / 2)
self.setPopupPosition(self_left, self_top)
def center(self):
self.centerBox()
self.show()
def add(self, widget):
self.setWidget(widget)
def show(self):
if self.showing:
return
self.showing = True
if self.glass:
self.showGlass()
DOM.addEventPreview(self)
self.rootpanel.add(self)
self.onShowImpl(self.getElement())
Factory.registerClass('pyjamas.ui.PopupPanel', 'PopupPanel', PopupPanel)
|
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for processing X.509 v3 certificates."""
import contextlib
import datetime
import ipaddress
import logging
import re
import socket
import ssl
import urllib.parse
import cryptography
import cryptography.hazmat
import cryptography.hazmat.backends
import cryptography.hazmat.primitives
import cryptography.hazmat.primitives.asymmetric
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.primitives.serialization
import cryptography.x509
import cryptography.x509.oid
import pyasn1.codec.der
import pyasn1.codec.der.decoder
import d1_common.const
OID_TO_SHORT_NAME_DICT = {
"""Map OID to short names for use when creating DataONE compliant serialization of the
DN.
This is pulled from LDAPv3 RFCs (RFC 4510 TO RFC 4519).
The set of OIDs that can occur in RDNs seems to be poorly defined. RFC 4514 refers to
a registry but, if the registry exists, it's probably too large to be useful to us. So
we pull in OIDs for a small set that can be expected in RDNs in certs from CILogon and
will just need to expand it if required.
RFC 4514 section 2: Converting DistinguishedName from ASN.1 to a String
If the AttributeType is defined to have a short name (descriptor) [RFC4512] and that
short name is known to be registered [REGISTRY] [RFC4520] as identifying the
AttributeType , that short name a <descr>, is used. Otherwise the AttributeType is
encoded as the dotted-decimal encoding , a <numericoid> of its OBJECT IDENTIFIER. The
<descr> and <numericoid> are defined in [RFC4512].
"""
"0.9.2342.19200300.100.1.1": "UID", # userId
"0.9.2342.19200300.100.1.25": "DC", # domainComponent
"1.2.840.113549.1.9.1": "email", # emailAddress
"2.5.4.3": "CN", # commonName
"2.5.4.4": "SN", # surname
"2.5.4.6": "C", # countryName
"2.5.4.7": "L", # localityName
"2.5.4.8": "ST", # stateOrProvinceName
"2.5.4.9": "STREET", # streetAddress
"2.5.4.10": "O", # organizationName
"2.5.4.11": "OU", # organizationalUnitName
}
DATAONE_SUBJECT_INFO_OID = "1.3.6.1.4.1.34998.2.1"
AUTHORITY_INFO_ACCESS_OID = "1.3.6.1.5.5.7.1.1" # authorityInfoAccess
CA_ISSUERS_OID = "1.3.6.1.5.5.7.48.2" # caIssuers
OCSP_OID = "1.3.6.1.5.5.7.48.1" # OCSP
UBUNTU_CA_BUNDLE_PATH = "/etc/ssl/certs/ca-certificates.crt"
# Subjects
def extract_subjects(cert_pem):
"""Extract primary subject and SubjectInfo from a DataONE PEM (Base64) encoded X.509
v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
2-tuple:
- Primary subject (str) extracted from the certificate DN.
- SubjectInfo (XML str) if present (see the subject_info module for parsing)
"""
cert_obj = deserialize_pem(cert_pem)
return extract_subject_from_dn(cert_obj), extract_subject_info_extension(cert_obj)
def extract_subject_from_dn(cert_obj):
"""Serialize a DN to a DataONE subject string.
Args:
cert_obj: cryptography.Certificate
Returns:
str:
Primary subject extracted from the certificate DN.
The certificate DN (DistinguishedName) is a sequence of RDNs
(RelativeDistinguishedName). Each RDN is a set of AVAs (AttributeValueAssertion /
AttributeTypeAndValue). A DataONE subject is a plain string. As there is no single
standard specifying how to create a string representation of a DN, DataONE selected
one of the most common ways, which yield strings such as:
CN=Some Name A123,O=Some Organization,C=US,DC=Some Domain,DC=org
In particular, the sequence of RDNs is reversed. Attribute values are escaped,
attribute type and value pairs are separated by "=", and AVAs are joined together
with ",". If an RDN contains an unknown OID, the OID is serialized as a dotted
string.
As all the information in the DN is preserved, it is not possible to create the
same subject with two different DNs, and the DN can be recreated from the subject.
"""
return ",".join(
"{}={}".format(
OID_TO_SHORT_NAME_DICT.get(v.oid.dotted_string, v.oid.dotted_string),
rdn_escape(v.value),
)
for v in reversed(list(cert_obj.subject))
)
def create_mn_dn(node_urn):
"""Create a certificate DN suitable for use in Member Node client side certificates
issued by DataONE, and thus in Certificate Signing Requests (CSR). The DN will be on
the form:
.. highlight:: none
::
DC=org, DC=dataone, CN=urn:node:<ID>
where <ID> typically is a short acronym for the name of the organization responsible
for the Member Node.
The DN is formatted into a DataONE subject, which is used in authentication,
authorization and event tracking.
Args:
node_urn (str): Node URN. E.g.:
- Production certificate: ``urn:node:XYZ``.
- Test certificate ``urn:node:mnTestXYZ``.
Returns:
cryptography.x509.Name
"""
return create_simple_dn(node_urn, domain_component_list=["org", "dataone"])
def create_simple_dn(common_name_str, domain_component_list=None):
"""Create a simple certificate DN suitable for use in testing and for generating
self signed CA and other certificate.
::
DC=local, DC=dataone, CN=<common name>
Args:
common_name_str: The Common Name to use for the certificate.
DataONE uses simple DNs without physical location information, so only the
``common_name_str`` (``CommonName``) needs to be specified.
For Member Node Client Side certificates or CSRs, ``common_name_str`` is the
``node_id``, e.g., ``urn:node:ABCD`` for production, or
``urn:node:mnTestABCD`` for the test environments.
For a local CA, something like ``localCA`` may be used.
For a locally trusted client side certificate, something like
``localClient`` may be used.
domain_component_list: list
Optionally set custom domain components.
fqdn_list: list of str
List of Fully Qualified Domain Names (FQDN) and/or IP addresses for which
this certificate will provide authentication.
E.g.: ['my.membernode.org', '1.2.3.4']
This is mainly useful for creating a self signed server side certificate or
a CSR that will be submitted to a trusted CA, such as Verisign, for signing.
Returns:
cryptography.x509.Name
"""
domain_component_list = domain_component_list or ["local", "dataone"]
attr_list = []
for dc_str in domain_component_list:
attr_list.append(
cryptography.x509.NameAttribute(
cryptography.x509.oid.NameOID.DOMAIN_COMPONENT, dc_str
)
)
attr_list.append(
cryptography.x509.NameAttribute(
cryptography.x509.oid.NameOID.COMMON_NAME, common_name_str
)
)
return cryptography.x509.Name(attr_list)
# CSR
def generate_csr(private_key_bytes, dn, fqdn_list=None):
"""Generate a Certificate Signing Request (CSR).
Args:
private_key_bytes: bytes
Private key with which the CSR will be signed.
dn: cryptography.x509.Name
The dn can be built by passing a list of cryptography.x509.NameAttribute to
cryptography.x509.Name.
Simple DNs can be created with the ``create_dn*`` functions in this module.
fqdn_list: list of str
List of Fully Qualified Domain Names (FQDN) and/or IP addresses for which
this certificate will provide authentication.
E.g.: ['my.membernode.org', '1.2.3.4']
This is mainly useful for creating a self signed server side certificate or
a CSR that will be submitted to a trusted CA, such as Verisign, for signing.
Returns:
cryptography.x509.CertificateSigningRequest
"""
csr = cryptography.x509.CertificateSigningRequestBuilder(subject_name=dn)
if fqdn_list:
csr.add_extension(
extension=cryptography.x509.SubjectAlternativeName(
[cryptography.x509.DNSName(v) for v in fqdn_list]
),
critical=False,
)
return csr.sign(
private_key=private_key_bytes,
algorithm=cryptography.hazmat.primitives.hashes.SHA256(),
backend=cryptography.hazmat.backends.default_backend(),
)
# PEM
def deserialize_pem(cert_pem):
"""Deserialize PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
cert_obj: cryptography.Certificate
"""
if isinstance(cert_pem, str):
cert_pem = cert_pem.encode("utf-8")
return cryptography.x509.load_pem_x509_certificate(
data=cert_pem, backend=cryptography.hazmat.backends.default_backend()
)
def deserialize_pem_file(cert_path):
"""Deserialize PEM (Base64) encoded X.509 v3 certificate in file.
Args:
cert_path: str or bytes
Path to PEM (Base64) encoded X.509 v3 certificate file
Returns:
cert_obj: cryptography.Certificate
"""
with open(cert_path, "rb") as f:
return deserialize_pem(f.read())
def serialize_cert_to_pem(cert_obj):
"""Serialize certificate to PEM.
The certificate can be also be a Certificate Signing Request (CSR).
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: PEM encoded certificate
"""
return cert_obj.public_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM
)
# DataONE SubjectInfo Extension
def extract_subject_info_extension(cert_obj):
"""Extract DataONE SubjectInfo XML doc from certificate.
Certificates issued by DataONE may include an embedded XML doc containing
additional information about the subject specified in the certificate DN. If
present, the doc is stored as an extension with an OID specified by DataONE and
formatted as specified in the DataONE SubjectInfo schema definition.
Args:
cert_obj: cryptography.Certificate
Returns:
str : SubjectInfo XML doc if present, else None
"""
try:
subject_info_der = cert_obj.extensions.get_extension_for_oid(
cryptography.x509.oid.ObjectIdentifier(DATAONE_SUBJECT_INFO_OID)
).value.value
return str(pyasn1.codec.der.decoder.decode(subject_info_der)[0])
except Exception as e:
logging.debug('SubjectInfo not extracted. reason="{}"'.format(e))
# Download Certificate
def download_as_der(
base_url=d1_common.const.URL_DATAONE_ROOT,
timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,
):
"""Download public certificate from a TLS/SSL web server as DER encoded ``bytes``.
If the certificate is being downloaded in order to troubleshoot validation issues,
the download itself may fail due to the validation issue that is being investigated.
To work around such chicken-and-egg problems, temporarily wrap calls to the
download_* functions with the ``disable_cert_validation()`` context manager (also in
this module).
Args:
base_url : str
A full URL to a DataONE service endpoint or a server hostname
timeout_sec : int or float
Timeout for the SSL socket operations
Returns:
bytes: The server's public certificate as DER encoded bytes.
"""
# TODO: It is unclear which SSL and TLS protocols are supported by the method
# currently being used. The current method and the two commented out below
# should be compared to determine which has the best compatibility with current
# versions of Python and current best practices for protocol selection.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout_sec)
ssl_socket = ssl.wrap_socket(sock)
url_obj = urllib.parse.urlparse(base_url)
ssl_socket.connect((url_obj.netloc, 443))
return ssl_socket.getpeercert(binary_form=True)
# (1)
# ssl_context = ssl.create_default_context()
# ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
# ssl_socket = ssl_context.wrap_socket(
# socket.socket(),
# server_hostname=django.conf.settings.DATAONE_ROOT
# )
#
# (2)
# ssl_protocol_list: PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23,
# PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
#
# Check if protocols should be checked in order of preference or if the
# function will do that.
#
# for ssl_protocol_str in ssl_protocol_list:
# print ssl_protocol_str
# try:
# return ssl.get_server_certificate(
# addr=(hostname_str, int(port_str)),
# ssl_version=getattr(ssl, ssl_protocol_str),
# ca_certs=UBUNTU_CA_BUNDLE_PATH,
# )
# except ssl.SSLError as e:
# logging.info('SSL: {}'.format(str(e)))
# if ssl_protocol_str is ssl_protoc`ol_list[-1]:
# raise
# Download
def download_as_pem(
base_url=d1_common.const.URL_DATAONE_ROOT,
timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,
):
"""Download public certificate from a TLS/SSL web server as PEM encoded string.
Also see download_as_der().
Args:
base_url : str
A full URL to a DataONE service endpoint or a server hostname
timeout_sec : int or float
Timeout for the SSL socket operations
Returns:
str: The certificate as a PEM encoded string.
"""
return ssl.DER_cert_to_PEM_cert(download_as_der(base_url, timeout_sec))
def download_as_obj(
base_url=d1_common.const.URL_DATAONE_ROOT,
timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,
):
"""Download public certificate from a TLS/SSL web server as Certificate object.
Also see download_as_der().
Args:
base_url : str
A full URL to a DataONE service endpoint or a server hostname
timeout_sec : int or float
Timeout for the SSL socket operations
Returns:
cryptography.Certificate
"""
return decode_der(download_as_der(base_url, timeout_sec))
def decode_der(cert_der):
"""Decode cert DER string to Certificate object.
Args:
cert_der : Certificate as a DER encoded string
Returns:
cryptography.Certificate()
"""
return cryptography.x509.load_der_x509_certificate(
data=cert_der, backend=cryptography.hazmat.backends.default_backend()
)
# noinspection PyProtectedMember
@contextlib.contextmanager
def disable_cert_validation():
"""Context manager to temporarily disable certificate validation in the standard SSL
library.
Note: This should not be used in production code but is sometimes useful for
troubleshooting certificate validation issues.
By design, the standard SSL library does not provide a way to disable verification
of the server side certificate. However, a patch to disable validation is described
by the library developers. This context manager allows applying the patch for
specific sections of code.
"""
current_context = ssl._create_default_https_context
ssl._create_default_https_context = ssl._create_unverified_context
try:
yield
finally:
ssl._create_default_https_context = current_context
def extract_issuer_ca_cert_url(cert_obj):
"""Extract issuer CA certificate URL from certificate.
Certificates may include a URL where the root certificate for the CA which was used
for signing the certificate can be downloaded. This function returns the URL if
present.
The primary use for this is to fix validation failure due to non-trusted issuer by
downloading the root CA certificate from the URL and installing it in the local
trust store.
Args:
cert_obj: cryptography.Certificate
Returns:
str: Issuer certificate URL if present, else None
"""
for extension in cert_obj.extensions:
if extension.oid.dotted_string == AUTHORITY_INFO_ACCESS_OID:
authority_info_access = extension.value
for access_description in authority_info_access:
if access_description.access_method.dotted_string == CA_ISSUERS_OID:
return access_description.access_location.value
# Private key
def serialize_private_key_to_pem(private_key, passphrase_bytes=None):
"""Serialize private key to PEM.
Args:
private_key:
passphrase_bytes:
Returns:
bytes: PEM encoded private key
"""
return private_key.private_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,
format=cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=cryptography.hazmat.primitives.serialization.BestAvailableEncryption(
passphrase_bytes
)
if passphrase_bytes is not None
else cryptography.hazmat.primitives.serialization.NoEncryption(),
)
def generate_private_key(key_size=2048):
"""Generate a private key."""
return cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=cryptography.hazmat.backends.default_backend(),
)
# Public Key
def get_public_key_pem(cert_obj):
"""Extract public key from certificate as PEM encoded PKCS#1.
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: PEM encoded PKCS#1 public key.
"""
return cert_obj.public_key().public_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,
format=cryptography.hazmat.primitives.serialization.PublicFormat.PKCS1,
)
# File
def save_pem(pem_path, pem_bytes):
"""Save PEM encoded bytes to file."""
with open(pem_path, "wb") as f:
f.write(pem_bytes)
def load_csr(pem_path):
"""Load CSR from PEM encoded file."""
with open(pem_path, "rb") as f:
return cryptography.x509.load_pem_x509_csr(
data=f.read(), backend=cryptography.hazmat.backends.default_backend()
)
def load_private_key(pem_path, passphrase_bytes=None):
"""Load private key from PEM encoded file."""
with open(pem_path, "rb") as f:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data=f.read(),
password=passphrase_bytes,
backend=cryptography.hazmat.backends.default_backend(),
)
# Client Side Certificate
def generate_cert(ca_issuer, ca_key, csr_subject, csr_pub_key):
# Various details about who we are. For a self-signed certificate the
# subject and issuer are always the same.
# Sign our certificate with our private key
return (
cryptography.x509.CertificateBuilder()
.subject_name(csr_subject)
.issuer_name(ca_issuer)
.public_key(csr_pub_key)
.serial_number(cryptography.x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(
# Our certificate will be valid for 10 days
datetime.datetime.utcnow()
+ datetime.timedelta(days=10)
)
.add_extension(
extension=cryptography.x509.SubjectAlternativeName(
[cryptography.x509.DNSName("localhost")]
),
critical=False,
)
.sign(
private_key=ca_key,
algorithm=cryptography.hazmat.primitives.hashes.SHA256(),
backend=cryptography.hazmat.backends.default_backend(),
)
)
def serialize_cert_to_der(cert_obj):
"""Serialize certificate to DER.
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: DER encoded certificate
"""
return cert_obj.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER
)
# CA
def generate_ca_cert(
dn, private_key, fqdn_str=None, public_ip=None, private_ip=None, valid_days=10 * 365
):
"""Args:
dn: cryptography.x509.Name
A cryptography.x509.Name holding a sequence of cryptography.x509.NameAttribute objects.
See the create_dn* functions.
private_key: RSAPrivateKey, etc
fqdn_str:
public_ip:
private_ip:
valid_days: int
Number of days from now until the certificate expires.
Returns:
"""
# best practice seem to be to include the hostname in the SAN, which *SHOULD*
# mean COMMON_NAME is ignored.
# allow addressing by IP, for when you don't have real DNS (common in most
# testing scenarios)
# openssl wants DNSnames for ips
# cryptography.x509.DNSName(private_ip),
alt_name_list = [cryptography.x509.DNSName("localhost")]
if fqdn_str:
alt_name_list.append(cryptography.x509.DNSName(fqdn_str))
if public_ip:
alt_name_list.append(cryptography.x509.DNSName(public_ip))
alt_name_list.append(
cryptography.x509.IPAddress(ipaddress.IPv4Address(public_ip))
)
if private_ip:
alt_name_list.append(cryptography.x509.DNSName(private_ip))
alt_name_list.append(
cryptography.x509.IPAddress(ipaddress.IPv4Address(private_ip))
)
alt_names = cryptography.x509.SubjectAlternativeName(alt_name_list)
# path_len=0: This cert can only sign itself, not other certs.
basic_constraints = cryptography.x509.BasicConstraints(ca=True, path_length=0)
now = datetime.datetime.utcnow()
return (
cryptography.x509.CertificateBuilder()
.subject_name(dn)
.issuer_name(dn)
.public_key(private_key.public_key())
.serial_number(cryptography.x509.random_serial_number())
.not_valid_before(now)
.not_valid_after(now + datetime.timedelta(days=valid_days))
.add_extension(basic_constraints, critical=False)
.add_extension(alt_names, critical=False)
.sign(
private_key=private_key,
algorithm=cryptography.hazmat.primitives.hashes.SHA256(),
backend=cryptography.hazmat.backends.default_backend(),
)
)
# Misc
def input_key_passphrase(applicable_str="private key"):
passphrase_str = input(
"Passphrase for {} (Press Enter for no passphrase): ".format(applicable_str)
)
if passphrase_str == "":
return None
return passphrase_str.encode("utf-8")
def check_cert_type(cert):
public_key = cert.public_key()
if isinstance(
public_key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey
):
print("IS RSA")
elif isinstance(
public_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey
):
print("IS EllipticCurvePublicKey")
else:
print("UNKNOWN")
def rdn_escape(rdn_str):
"""Escape string for use as an RDN (RelativeDistinguishedName)
The following chars must be escaped in RDNs: , = + < > # ; \ "
Args:
rdn_str : str
Returns:
str: Escaped string ready for use in an RDN (.)
"""
return re.sub(r"([,=+<>#;\\])", r"\\\1", rdn_str)
# noinspection PyProtectedMember
def log_cert_info(logger, msg_str, cert_obj):
"""Dump basic certificate values to the log.
Args:
logger: Logger
Logger to which to write the certificate values.
msg_str: str
A message to write to the log before the certificate values.
cert_obj: cryptography.Certificate
Certificate containing values to log.
Returns:
None
"""
list(
map(
logger,
["{}:".format(msg_str)]
+ [" {}: {}".format(k, v) for k, v in get_cert_info_list(cert_obj)],
)
)
# noinspection PyProtectedMember
def get_cert_info_list(cert_obj):
"""Get a list of certificate values.
Args:
cert_obj: cryptography.Certificate
Certificate containing values to retrieve.
Returns:
list of tup: Certificate value name, value
"""
return [
("Subject", _get_val_str(cert_obj, ["subject", "value"], reverse=True)),
("Issuer", _get_val_str(cert_obj, ["issuer", "value"], reverse=True)),
("Not Valid Before", cert_obj.not_valid_before.isoformat()),
("Not Valid After", cert_obj.not_valid_after.isoformat()),
(
"Subject Alt Names",
_get_ext_val_str(cert_obj, "SUBJECT_ALTERNATIVE_NAME", ["value", "value"]),
),
(
"CRL Distribution Points",
_get_ext_val_str(
cert_obj,
"CRL_DISTRIBUTION_POINTS",
["value", "full_name", "value", "value"],
),
),
(
"Authority Access Location",
extract_issuer_ca_cert_url(cert_obj) or "<not found>",
),
]
def get_extension_by_name(cert_obj, extension_name):
"""Get a standard certificate extension by attribute name.
Args:
cert_obj: cryptography.Certificate
Certificate containing a standard extension.
extension_name : str
Extension name. E.g., 'SUBJECT_DIRECTORY_ATTRIBUTES'.
Returns:
Cryptography.Extension
"""
try:
return cert_obj.extensions.get_extension_for_oid(
getattr(cryptography.x509.oid.ExtensionOID, extension_name)
)
except cryptography.x509.ExtensionNotFound:
pass
# Private
def _get_val_list(obj, path_list, reverse=False):
"""Extract values from nested objects by attribute names.
Objects contain attributes which are named references to objects. This will descend
down a tree of nested objects, starting at the given object, following the given
path.
Args:
obj: object
Any type of object
path_list: list
Attribute names
reverse: bool
Reverse the list of values before concatenation.
Returns:
list of objects
"""
try:
y = getattr(obj, path_list[0])
except AttributeError:
return []
if len(path_list) == 1:
return [y]
else:
val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)]
if reverse:
val_list.reverse()
return val_list
def _get_val_str(obj, path_list=None, reverse=False):
"""Extract values from nested objects by attribute names and concatenate their
string representations.
Args:
obj: object
Any type of object
path_list: list
Attribute names
reverse: bool
Reverse the list of values before concatenation.
Returns:
str: Concatenated extracted values.
"""
val_list = _get_val_list(obj, path_list or [], reverse)
return "<not found>" if obj is None else " / ".join(map(str, val_list))
def _get_ext_val_str(cert_obj, extension_name, path_list=None):
"""Get value from certificate extension.
Args:
cert_obj: cryptography.Certificate
Certificate containing a standard extension.
extension_name : str
Extension name. E.g., 'SUBJECT_DIRECTORY_ATTRIBUTES'.
path_list: list
Attribute names
Returns:
str : String value of extension
"""
return _get_val_str(
get_extension_by_name(cert_obj, extension_name), path_list or []
)
|
|
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for volume types code."""
import datetime
import time
from oslo_config import cfg
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as db_api
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import test
from cinder.tests.unit import conf_fixture
from cinder.volume import qos_specs
from cinder.volume import volume_types
class VolumeTypeTestCase(test.TestCase):
"""Test cases for volume type code."""
def setUp(self):
super(VolumeTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.vol_type1_name = str(int(time.time()))
self.vol_type1_specs = dict(type="physical drive",
drive_type="SAS",
size="300",
rpm="7200",
visible="True")
self.vol_type1_description = self.vol_type1_name + '_desc'
def test_volume_type_create_then_destroy(self):
"""Ensure volume types can be created and deleted."""
prev_all_vtypes = volume_types.get_all_types(self.ctxt)
# create
type_ref = volume_types.create(self.ctxt,
self.vol_type1_name,
self.vol_type1_specs,
description=self.vol_type1_description)
new = volume_types.get_volume_type_by_name(self.ctxt,
self.vol_type1_name)
self.assertEqual(self.vol_type1_description, new['description'])
for k, v in self.vol_type1_specs.items():
self.assertEqual(v, new['extra_specs'][k],
'one of fields does not match')
new_all_vtypes = volume_types.get_all_types(self.ctxt)
self.assertEqual(len(prev_all_vtypes) + 1,
len(new_all_vtypes),
'drive type was not created')
# update
new_type_name = self.vol_type1_name + '_updated'
new_type_desc = self.vol_type1_description + '_updated'
type_ref_updated = volume_types.update(self.ctxt,
type_ref.id,
new_type_name,
new_type_desc)
self.assertEqual(new_type_name, type_ref_updated['name'])
self.assertEqual(new_type_desc, type_ref_updated['description'])
# destroy
volume_types.destroy(self.ctxt, type_ref['id'])
new_all_vtypes = volume_types.get_all_types(self.ctxt)
self.assertEqual(prev_all_vtypes,
new_all_vtypes,
'drive type was not deleted')
def test_create_volume_type_with_invalid_params(self):
"""Ensure exception will be returned."""
vol_type_invalid_specs = "invalid_extra_specs"
self.assertRaises(exception.VolumeTypeCreateFailed,
volume_types.create, self.ctxt,
self.vol_type1_name,
vol_type_invalid_specs)
def test_get_all_volume_types(self):
"""Ensures that all volume types can be retrieved."""
session = db_api.get_session()
total_volume_types = session.query(models.VolumeTypes).count()
vol_types = volume_types.get_all_types(self.ctxt)
self.assertEqual(total_volume_types, len(vol_types))
def test_get_default_volume_type(self):
"""Ensures default volume type can be retrieved."""
volume_types.create(self.ctxt, conf_fixture.def_vol_type, {})
default_vol_type = volume_types.get_default_volume_type()
self.assertEqual(conf_fixture.def_vol_type,
default_vol_type.get('name'))
def test_default_volume_type_missing_in_db(self):
"""Test default volume type is missing in database.
Ensures proper exception raised if default volume type
is not in database.
"""
default_vol_type = volume_types.get_default_volume_type()
self.assertEqual({}, default_vol_type)
def test_get_default_volume_type_under_non_default(self):
cfg.CONF.set_default('default_volume_type', None)
self.assertEqual({}, volume_types.get_default_volume_type())
def test_non_existent_vol_type_shouldnt_delete(self):
"""Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.VolumeTypeNotFound,
volume_types.destroy, self.ctxt, "sfsfsdfdfs")
def test_volume_type_with_volumes_shouldnt_delete(self):
"""Ensures volume type deletion with associated volumes fail."""
type_ref = volume_types.create(self.ctxt, self.vol_type1_name)
db.volume_create(self.ctxt,
{'id': '1',
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'available',
'volume_type_id': type_ref['id']})
self.assertRaises(exception.VolumeTypeInUse,
volume_types.destroy, self.ctxt, type_ref['id'])
def test_repeated_vol_types_shouldnt_raise(self):
"""Ensures that volume duplicates don't raise."""
new_name = self.vol_type1_name + "dup"
type_ref = volume_types.create(self.ctxt, new_name)
volume_types.destroy(self.ctxt, type_ref['id'])
type_ref = volume_types.create(self.ctxt, new_name)
def test_invalid_volume_types_params(self):
"""Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.InvalidVolumeType,
volume_types.destroy, self.ctxt, None)
self.assertRaises(exception.InvalidVolumeType,
volume_types.get_volume_type, self.ctxt, None)
self.assertRaises(exception.InvalidVolumeType,
volume_types.get_volume_type_by_name,
self.ctxt, None)
def test_volume_type_get_by_id_and_name(self):
"""Ensure volume types get returns same entry."""
volume_types.create(self.ctxt,
self.vol_type1_name,
self.vol_type1_specs)
new = volume_types.get_volume_type_by_name(self.ctxt,
self.vol_type1_name)
new2 = volume_types.get_volume_type(self.ctxt, new['id'])
self.assertEqual(new, new2)
def test_volume_type_search_by_extra_spec(self):
"""Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2"})
volume_types.create(self.ctxt, "type2", {"key2": "val2",
"key3": "val3"})
volume_types.create(self.ctxt, "type3", {"key3": "another_value",
"key4": "val4"})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key1": "val1"}})
self.assertEqual(1, len(vol_types))
self.assertIn("type1", vol_types.keys())
self.assertEqual({"key1": "val1", "key2": "val2"},
vol_types['type1']['extra_specs'])
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key2": "val2"}})
self.assertEqual(2, len(vol_types))
self.assertIn("type1", vol_types.keys())
self.assertIn("type2", vol_types.keys())
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key3": "val3"}})
self.assertEqual(1, len(vol_types))
self.assertIn("type2", vol_types.keys())
def test_volume_type_search_by_extra_spec_multiple(self):
"""Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2",
"key3": "val3"})
volume_types.create(self.ctxt, "type2", {"key2": "val2",
"key3": "val3"})
volume_types.create(self.ctxt, "type3", {"key1": "val1",
"key3": "val3",
"key4": "val4"})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key1": "val1",
"key3": "val3"}})
self.assertEqual(2, len(vol_types))
self.assertIn("type1", vol_types.keys())
self.assertIn("type3", vol_types.keys())
self.assertEqual({"key1": "val1", "key2": "val2", "key3": "val3"},
vol_types['type1']['extra_specs'])
self.assertEqual({"key1": "val1", "key3": "val3", "key4": "val4"},
vol_types['type3']['extra_specs'])
def test_is_encrypted(self):
volume_type = volume_types.create(self.ctxt, "type1")
volume_type_id = volume_type.get('id')
self.assertFalse(volume_types.is_encrypted(self.ctxt, volume_type_id))
encryption = {
'control_location': 'front-end',
'provider': 'fake_provider',
}
db_api.volume_type_encryption_create(self.ctxt, volume_type_id,
encryption)
self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id))
def test_add_access(self):
project_id = '456'
vtype = volume_types.create(self.ctxt, 'type1', is_public=False)
vtype_id = vtype.get('id')
volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id)
vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id)
self.assertIn(project_id, [a.project_id for a in vtype_access])
def test_remove_access(self):
project_id = '456'
vtype = volume_types.create(self.ctxt, 'type1', projects=['456'],
is_public=False)
vtype_id = vtype.get('id')
volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id)
vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id)
self.assertNotIn(project_id, vtype_access)
def test_get_volume_type_qos_specs(self):
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'k1': 'v1',
'k2': 'v2',
'k3': 'v3'})
type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2",
"key3": "val3"})
res = volume_types.get_volume_type_qos_specs(type_ref['id'])
self.assertIsNone(res['qos_specs'])
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
expected = {'qos_specs': {'id': qos_ref['id'],
'name': 'qos-specs-1',
'consumer': 'back-end',
'specs': {
'k1': 'v1',
'k2': 'v2',
'k3': 'v3'}}}
res = volume_types.get_volume_type_qos_specs(type_ref['id'])
self.assertDictMatch(expected, res)
def test_volume_types_diff(self):
# type_ref 1 and 2 have the same extra_specs, while 3 has different
keyvals1 = {"key1": "val1", "key2": "val2"}
keyvals2 = {"key1": "val0", "key2": "val2"}
type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1)
type_ref2 = volume_types.create(self.ctxt, "type2", keyvals1)
type_ref3 = volume_types.create(self.ctxt, "type3", keyvals2)
# Check equality with only extra_specs
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertTrue(same)
self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1'])
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref3['id'])
self.assertFalse(same)
self.assertEqual(('val1', 'val0'), diff['extra_specs']['key1'])
# qos_ref 1 and 2 have the same specs, while 3 has different
qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}
qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'}
qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1)
qos_ref2 = qos_specs.create(self.ctxt, 'qos-specs-2', qos_keyvals1)
qos_ref3 = qos_specs.create(self.ctxt, 'qos-specs-3', qos_keyvals2)
# Check equality with qos specs too
qos_specs.associate_qos_with_type(self.ctxt, qos_ref1['id'],
type_ref1['id'])
qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'],
type_ref2['id'])
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertTrue(same)
self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1'])
self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1'])
qos_specs.disassociate_qos_specs(self.ctxt, qos_ref2['id'],
type_ref2['id'])
qos_specs.associate_qos_with_type(self.ctxt, qos_ref3['id'],
type_ref2['id'])
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertFalse(same)
self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1'])
self.assertEqual(('v1', 'v0'), diff['qos_specs']['k1'])
qos_specs.disassociate_qos_specs(self.ctxt, qos_ref3['id'],
type_ref2['id'])
qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'],
type_ref2['id'])
# And add encryption for good measure
enc_keyvals1 = {'cipher': 'c1', 'key_size': 256, 'provider': 'p1',
'control_location': 'front-end',
'encryption_id': 'uuid1'}
enc_keyvals2 = {'cipher': 'c1', 'key_size': 128, 'provider': 'p1',
'control_location': 'front-end',
'encryption_id': 'uuid2'}
db.volume_type_encryption_create(self.ctxt, type_ref1['id'],
enc_keyvals1)
db.volume_type_encryption_create(self.ctxt, type_ref2['id'],
enc_keyvals2)
diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'],
type_ref2['id'])
self.assertFalse(same)
self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1'])
self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1'])
self.assertEqual((256, 128), diff['encryption']['key_size'])
# Check diff equals type specs when one type is None
diff, same = volume_types.volume_types_diff(self.ctxt, None,
type_ref1['id'])
self.assertFalse(same)
self.assertEqual({'key1': (None, 'val1'), 'key2': (None, 'val2')},
diff['extra_specs'])
self.assertEqual({'consumer': (None, 'back-end'),
'k1': (None, 'v1'),
'k2': (None, 'v2'),
'k3': (None, 'v3')}, diff['qos_specs'])
self.assertEqual({'cipher': (None, 'c1'),
'control_location': (None, 'front-end'),
'deleted': (None, False),
'key_size': (None, 256),
'provider': (None, 'p1'),
'encryption_id': (None, 'uuid1')},
diff['encryption'])
def test_encryption_create(self):
volume_type = volume_types.create(self.ctxt, "type1")
volume_type_id = volume_type.get('id')
encryption = {
'control_location': 'front-end',
'provider': 'fake_provider',
}
db_api.volume_type_encryption_create(self.ctxt, volume_type_id,
encryption)
self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id))
def test_get_volume_type_encryption(self):
volume_type = volume_types.create(self.ctxt, "type1")
volume_type_id = volume_type.get('id')
encryption = {
'control_location': 'front-end',
'provider': 'fake_provider',
}
db.volume_type_encryption_create(self.ctxt, volume_type_id,
encryption)
ret = volume_types.get_volume_type_encryption(self.ctxt,
volume_type_id)
self.assertIsNotNone(ret)
def test_get_volume_type_encryption_without_volume_type_id(self):
ret = volume_types.get_volume_type_encryption(self.ctxt, None)
self.assertIsNone(ret)
def test_check_public_volume_type_failed(self):
project_id = '456'
volume_type = volume_types.create(self.ctxt, "type1")
volume_type_id = volume_type.get('id')
self.assertRaises(exception.InvalidVolumeType,
volume_types.add_volume_type_access,
self.ctxt, volume_type_id, project_id)
self.assertRaises(exception.InvalidVolumeType,
volume_types.remove_volume_type_access,
self.ctxt, volume_type_id, project_id)
def test_check_private_volume_type(self):
volume_type = volume_types.create(self.ctxt, "type1", is_public=False)
volume_type_id = volume_type.get('id')
self.assertFalse(volume_types.is_public_volume_type(self.ctxt,
volume_type_id))
def test_ensure_no_extra_specs_for_non_admin(self):
# non-admin users shouldn't get extra-specs back in type-get/list etc
ctxt = context.RequestContext('average-joe',
'd802f078-0af1-4e6b-8c02-7fac8d4339aa',
auth_token='token',
is_admin=False)
volume_types.create(self.ctxt, "type-test", is_public=False)
vtype = volume_types.get_volume_type_by_name(ctxt, 'type-test')
self.assertIsNone(vtype.get('extra_specs', None))
def test_ensure_extra_specs_for_admin(self):
# admin users should get extra-specs back in type-get/list etc
volume_types.create(self.ctxt, "type-test", is_public=False)
vtype = volume_types.get_volume_type_by_name(self.ctxt, 'type-test')
self.assertIsNotNone(vtype.get('extra_specs', None))
|
|
# -*- coding:utf-8 -*-
"""
This module is pending deprecation as of Django 1.6 and will be removed in
version 1.8.
"""
from importlib import import_module
import json
import re
import unittest as real_unittest
import warnings
from django.apps import apps
from django.test import _doctest as doctest
from django.test import runner
from django.test.utils import compare_xml, strip_quotes
# django.utils.unittest is deprecated, but so is django.test.simple,
# and the latter will be removed before the former.
from django.utils import unittest
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner',)
warnings.warn(
"The django.test.simple module and DjangoTestSuiteRunner are deprecated; "
"use django.test.runner.DiscoverRunner instead.",
RemovedInDjango18Warning)
# The module name for tests outside models.py
TEST_MODULE = 'tests'
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)",
lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"""
The entry method for doctest output checking. Defers to a sequence of
child checkers
"""
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"""
The default comparator provided by doctest - not perfect, but good for
most purposes
"""
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
try:
return compare_xml(want, got)
except Exception:
return False
def check_output_json(self, want, got, optionsflags):
"""
Tries to compare want and got as if they were JSON-encoded data
"""
want, got = strip_quotes(want, got)
try:
want_json = json.loads(want)
got_json = json.loads(got)
except Exception:
return False
return want_json == got_json
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
doctestOutputChecker = OutputChecker()
def get_tests(app_config):
try:
test_module = import_module('%s.%s' % (app_config.name, TEST_MODULE))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
if not module_has_submodule(app_config.module, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def make_doctest(module):
return doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
def build_suite(app_config):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
models_module = app_config.models_module
if models_module:
if hasattr(models_module, 'suite'):
suite.addTest(models_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
models_module))
try:
suite.addTest(make_doctest(models_module))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
tests_module = get_tests(app_config)
if tests_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(tests_module, 'suite'):
suite.addTest(tests_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
tests_module))
try:
suite.addTest(make_doctest(tests_module))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form app_label.TestClass or app_label.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
app_config = apps.get_app_config(parts[0])
models_module = app_config.models_module
tests_module = get_tests(app_config)
test_modules = []
if models_module:
test_modules.append(models_module)
if tests_module:
test_modules.append(tests_module)
TestClass = None
for module in test_modules:
TestClass = getattr(module, parts[1], None)
if TestClass is not None:
break
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in test_modules:
try:
doctests = make_doctest(module)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
class DjangoTestSuiteRunner(runner.DiscoverRunner):
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app_config = apps.get_app_config(label)
suite.addTest(build_suite(app_config))
else:
for app_config in apps.get_app_configs():
suite.addTest(build_suite(app_config))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return runner.reorder_suite(suite, (unittest.TestCase,))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-GPU tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.contrib.distribute.python import values
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
GPU_TEST = "test_gpu" in sys.argv[0]
class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase):
def _get_distribution_strategy(self):
devices = ["/device:CPU:0", "/device:GPU:0"]
if GPU_TEST:
self.assertGreater(context.num_gpus(), 0)
if context.num_gpus() > 1:
devices = ["/device:GPU:0", "/device:GPU:1"]
print(self.id().split(".")[-1], "devices:", ", ".join(devices))
return mirrored_strategy.MirroredStrategy(devices)
def testMinimizeLossEager(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_minimize_loss_eager(self._get_distribution_strategy())
def testMinimizeLossGraph(self):
soft_placement = not GPU_TEST
print("testMinimizeLossGraph soft_placement:", soft_placement)
self._test_minimize_loss_graph(
self._get_distribution_strategy(), soft_placement=soft_placement)
def testMapReduce(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_map_reduce(self._get_distribution_strategy())
def testDeviceIndex(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_device_index(self._get_distribution_strategy())
def testTowerId(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_tower_id(self._get_distribution_strategy())
def testNumTowers(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self.assertEqual(2, self._get_distribution_strategy().num_towers)
@test_util.run_in_graph_and_eager_modes
def testCallAndMergeExceptions(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_call_and_merge_exceptions(self._get_distribution_strategy())
@test_util.run_in_graph_and_eager_modes
def testRunRegroupError(self):
def run_fn(device_id):
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(device_id))
dist = self._get_distribution_strategy()
with dist.scope(), self.assertRaises(AssertionError):
dist.call_for_each_tower(run_fn, dist.worker_device_index)
@test_util.run_in_graph_and_eager_modes
def testReduceToCpu(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
def run_fn(device_id):
return device_id
dist = self._get_distribution_strategy()
with dist.scope():
result = dist.call_for_each_tower(run_fn, dist.worker_device_index)
reduced = dist.reduce(
variable_scope.VariableAggregation.SUM,
result,
destinations="/device:CPU:0")
unwrapped = dist.unwrap(reduced)
self.assertEqual(1, len(unwrapped))
expected = sum(range(len(dist.worker_devices)))
self.assertEqual(expected, self.evaluate(unwrapped[0]))
@test_util.run_in_graph_and_eager_modes()
def testReduceToMultipleDestinations(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
devices = ["/device:GPU:0"]
if GPU_TEST:
self.assertGreater(context.num_gpus(), 0)
print(self.id().split(".")[-1], "devices:", ", ".join(devices))
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
reduced = dist.reduce(
variable_scope.VariableAggregation.SUM,
1.0,
destinations=["/device:CPU:0", "/device:GPU:0"])
unwrapped = dist.unwrap(reduced)
self.assertEqual(2, len(unwrapped))
self.assertEqual(1.0, self.evaluate(unwrapped[0]))
class MirroredStrategyVariableCreationTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Enough GPUs not available for this test in eager mode.")
@test_util.run_in_graph_and_eager_modes(config=config)
def testSingleVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
# This variable should be created only once across the threads because of
# special variable_creator functions used by `dist.call_for_each_tower`.
v = variable_scope.variable(1.0, name="foo")
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testUnnamedVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v = variable_scope.variable(1.0)
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# Default name of "Variable" will be used.
self.assertEquals("Variable:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariables(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
for i in range(5):
vs.append(variable_scope.variable(1.0, name="foo" + str(i)))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for i, v in enumerate(result):
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals("foo" + str(i) + ":0", v.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariablesWithSameCanonicalName(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
vs.append(variable_scope.variable(1.0, name="foo/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar_1"))
vs.append(variable_scope.variable(1.0, name="foo/bar_1"))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for v in result:
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals(4, len(result))
self.assertEquals("foo/bar:0", result[0].name)
self.assertEquals("foo_1/bar:0", result[1].name)
self.assertEquals("foo_1/bar_1:0", result[2].name)
self.assertEquals("foo/bar_1:0", result[3].name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableWithSameCanonicalNameAcrossThreads(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(device_id):
v = variable_scope.variable(1.0, name="foo_" + str(device_id))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# The resulting mirrored variable will use the name from the first device.
self.assertEquals("foo_0:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithLayers(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(features):
with variable_scope.variable_scope("common"):
layer1 = core.Dense(1)
layer1(features)
layer2 = core.Dense(1)
layer2(features)
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
layer3 = core.Dense(1)
layer3(features)
return [(layer1.kernel, layer1.bias),
(layer2.kernel, layer2.bias),
(layer3.kernel, layer3.bias)]
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
features = dist.distribute_dataset(
lambda: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10)
).make_one_shot_iterator().get_next()
with dist.scope():
result = dist.call_for_each_tower(
model_fn, features, run_concurrently=False)
suffixes = ["", "_1", "_2"]
for (kernel, bias), suffix in zip(result, suffixes):
self.assertIsInstance(kernel, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/kernel:0", kernel.name)
self.assertIsInstance(bias, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/bias:0", bias.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithVariableAndVariableScope(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v0 = variable_scope.variable(1.0, name="var0", aggregation=None)
with variable_scope.variable_scope("common"):
v1 = variable_scope.variable(1.0, name="var1")
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
v2 = variable_scope.variable(
1.0,
name="var2",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.variable(
1.0,
name="var3",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
v = variable_scope.variable(1.0, name="var-main0")
self.assertEquals("var-main0:0", v.name)
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEquals("var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEquals("common/var1:0", v1.name)
self.assertIsInstance(v2, values.TowerLocalVariable)
self.assertEquals("common/var2:0", v2.name)
self.assertEquals(variable_scope.VariableAggregation.SUM, v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEquals("common/var3:0", v3.name)
self.assertEquals(variable_scope.VariableAggregation.MEAN, v3.aggregation)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithGetVariableAndVariableScope(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v0 = variable_scope.get_variable("var0", [1])
with variable_scope.variable_scope("common"):
v1 = variable_scope.get_variable("var1", [1])
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
v2 = variable_scope.get_variable(
"var2", [1],
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.get_variable(
"var3", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with variable_scope.variable_scope("main"):
v = variable_scope.get_variable("var-main0", [1])
self.assertEquals("main/var-main0:0", v.name)
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEquals("main/var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEquals("main/common/var1:0", v1.name)
self.assertIsInstance(v2, values.TowerLocalVariable)
self.assertEquals("main/common/var2:0", v2.name)
self.assertEquals(variable_scope.VariableAggregation.SUM,
v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEquals("main/common/var3:0", v3.name)
self.assertEquals(variable_scope.VariableAggregation.MEAN,
v3.aggregation)
@test_util.run_in_graph_and_eager_modes(config=config)
def testNoneSynchronizationWithGetVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.NONE)
@test_util.run_in_graph_and_eager_modes(config=config)
def testNoneSynchronizationWithVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.NONE)
@test_util.run_in_graph_and_eager_modes(config=config)
def testInvalidSynchronizationWithVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable synchronization mode: Invalid for "
"variable: v"):
variable_scope.variable(1.0, name="v", synchronization="Invalid")
@test_util.run_in_graph_and_eager_modes(config=config)
def testInvalidAggregationWithGetVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
@test_util.run_in_graph_and_eager_modes(config=config)
def testInvalidAggregationWithVariable(self):
self._skip_eager_if_gpus_less_than(1)
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
@test_util.run_in_graph_and_eager_modes(config=config)
def testThreeDevices(self):
self._skip_eager_if_gpus_less_than(2)
def model_fn():
v = variable_scope.variable(1.0, name="foo")
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testNonMatchingVariableCreation(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(name):
v = variable_scope.variable(1.0, name=name)
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
names = values.DistributedValues({
"/device:CPU:0": "foo",
"/device:GPU:0": "bar"
})
with self.assertRaises(RuntimeError):
_ = dist.call_for_each_tower(model_fn, names, run_concurrently=False)
@test_util.run_in_graph_and_eager_modes(config=config)
def testTowerLocalVariable(self):
self._skip_eager_if_gpus_less_than(1)
all_v_sum = {}
all_v_mean = {}
components_sum = {}
components_mean = {}
def model_fn(device_id):
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v_mean = variable_scope.variable(
4.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
self.assertTrue(isinstance(v_mean, values.TowerLocalVariable))
updates = [v_sum.assign_add(2.0 + device_id),
v_mean.assign(6.0 * device_id)]
all_v_sum[device_id] = v_sum
all_v_mean[device_id] = v_mean
c_sum = v_sum.get()
c_mean = v_mean.get()
components_sum[device_id] = c_sum
components_mean[device_id] = c_mean
self.assertIsNot(v_sum, c_sum)
self.assertIsNot(v_mean, c_mean)
return updates, v_sum, v_mean, c_sum, c_mean
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
# Create "sum" and "mean" versions of TowerLocalVariables.
ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = (
dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False))
# Should see the same wrapping instance in all towers.
self.assertIs(all_v_sum[0], ret_v_sum)
self.assertIs(all_v_mean[0], ret_v_mean)
self.assertIs(all_v_sum[0], all_v_sum[1])
self.assertIs(all_v_mean[0], all_v_mean[1])
# Regroup should recover the same wrapper.
self.assertIs(ret_v_sum, regrouped_sum)
self.assertIs(ret_v_mean, regrouped_mean)
self.assertIsNot(components_sum[0], components_sum[1])
self.assertIsNot(components_mean[0], components_mean[1])
# Apply updates
self.evaluate(variables.global_variables_initializer())
self.evaluate([y for x in ret_ops for y in dist.unwrap(x)])
expected_sum = 0.0
expected_mean = 0.0
for i, d in enumerate(dist.worker_devices):
# Should see different values on different devices.
v_sum_value = self.evaluate(ret_v_sum.get(d).read_value())
v_mean_value = self.evaluate(ret_v_mean.get(d).read_value())
expected = i + 3.0
self.assertEqual(expected, v_sum_value)
expected_sum += expected
expected = i * 6.0
self.assertEqual(expected, v_mean_value)
expected_mean += expected
expected_mean /= len(dist.worker_devices)
# Without get(device), should return the value you get by
# applying the reduction across all towers (whether you use
# read_var(), get(), or nothing).
self.assertEqual(expected_sum, self.evaluate(dist.read_var(ret_v_sum)))
self.assertEqual(expected_mean, self.evaluate(dist.read_var(ret_v_mean)))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get()))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get()))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean))
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
distribute_lib.get_tower_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("main/foo/" + name + ":0", v0.name)
self.assertEquals("main/tower_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
distribute_lib.get_tower_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("foo/" + name + ":0", v0.name)
self.assertEquals("tower_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes when creating variables. We test both methods of creating variables
# to make sure that we have the same variable names in both cases.
def testNameScopeWithVariable(self):
def in_cross_tower(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = distribute_lib.get_tower_context().merge_call(in_cross_tower)
return b, c
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = dist.unwrap(a)
b0, b1 = dist.unwrap(result_b)
c0, c1 = dist.unwrap(result_c)
self.assertEquals("main/a:0", a0.name)
self.assertEquals("main/a/replica_1:0", a1.name)
self.assertEquals("main/b:0", b0.name)
self.assertEquals("main/b/replica_1:0", b1.name)
self.assertEquals("main/foo/c:0", c0.name)
self.assertEquals("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self):
def in_cross_tower(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = distribute_lib.get_tower_context().merge_call(in_cross_tower)
return b, c
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = dist.unwrap(a)
b0, b1 = dist.unwrap(result_b)
c0, c1 = dist.unwrap(result_c)
self.assertEquals("a:0", a0.name)
self.assertEquals("a/replica_1:0", a1.name)
self.assertEquals("b:0", b0.name)
self.assertEquals("b/replica_1:0", b1.name)
self.assertEquals("c:0", c0.name)
self.assertEquals("c/replica_1:0", c1.name)
def testDynamicRnnVariables(self):
def model_fn():
inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])
cell_fw = rnn_cell_impl.LSTMCell(300)
cell_bw = rnn_cell_impl.LSTMCell(300)
(outputs, _) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32)
return outputs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
# Two variables are created by the RNN layer.
self.assertEquals(2, len(result))
for v in result:
self.assertIsInstance(v, values.DistributedValues)
_, v1 = dist.unwrap(v)
self.assertStartsWith(v1.name, "tower_1/")
@test_util.run_in_graph_and_eager_modes(config=config)
def testTowerLocalVariableUpdate(self):
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
return v_sum
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1"])
def update(var, value):
return var.assign(value)
with dist.scope():
ret_v_sum = dist.call_for_each_tower(model_fn, run_concurrently=False)
update_ops = dist.unwrap(dist.update(ret_v_sum, update, 5.0))
# Initialize variables.
self.evaluate(variables.global_variables_initializer())
# Assert that the aggregated value of the tower local vars is the sum of
# the individual values before running the update ops.
self.assertEquals(1.0, self.evaluate(
ret_v_sum.get(dist._devices[0]).read_value()))
self.assertEquals(2.0, self.evaluate(ret_v_sum))
# Apply updates.
self.evaluate(update_ops)
# Assert that the aggregated value of the tower local vars is the sum of
# the individual values after running the update ops.
self.assertEquals(5.0, self.evaluate(
ret_v_sum.get(dist._devices[0]).read_value()))
self.assertEquals(10.0, self.evaluate(ret_v_sum))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in tower and cross tower context.
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Enough GPUs not available for this test in eager mode.")
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarTowerContextWithoutAggregationType(self):
# Test that we always have an aggregation type set on the mirrored variable
# if we assign to it in tower mode.
self._skip_eager_if_gpus_less_than(1)
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "You must specify an aggregation method to update a "
"MirroredVariable in Tower Context."):
self.evaluate(dist.unwrap(dist.call_for_each_tower(model_fn)))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarTowerContextWithSum(self):
# Test that we don't reduce a non-per-device value with the "sum"
# aggregation type.
self._skip_eager_if_gpus_less_than(1)
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "A non PerDevice value cannot be reduced with the given "
"aggregation."):
self.evaluate(dist.unwrap(dist.call_for_each_tower(model_fn)))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarCrossTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(1.0, name="foo")
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEquals(6.0, mirrored_var_result)
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignMirroredVarTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(distribute_lib.get_tower_context().tower_id,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(0.5, self.evaluate(mirrored_var))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignAddMirroredVarCrossTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(1.0, name="foo")
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_add(6.0))
self.assertEquals(7.0, mirrored_var_result)
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignAddMirroredVarTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(distribute_lib.get_tower_context().tower_id,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(1.5, self.evaluate(mirrored_var))
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignSubMirroredVarCrossTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(5.0, name="foo")
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEquals(3.0, mirrored_var_result)
@test_util.run_in_graph_and_eager_modes(config=config)
def testAssignSubMirroredVarTowerContext(self):
self._skip_eager_if_gpus_less_than(1)
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEquals(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(distribute_lib.get_tower_context().tower_id,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(dist.unwrap(dist.call_for_each_tower(
model_fn, run_concurrently=False)))
self.assertEquals(4.5, self.evaluate(mirrored_var))
class MirroredAndTowerLocalVariableInitializerTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def testAssignMirroredVarInitializer(self):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
mirrored_var = dist.call_for_each_tower(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignTowerLocalVarInitializer(self):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
return v_sum
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
tower_local_var = dist.call_for_each_tower(model_fn)
self.assertTrue(isinstance(tower_local_var, values.TowerLocalVariable))
self.assertFalse(self.evaluate(tower_local_var.is_initialized()))
self.evaluate(tower_local_var.initializer)
self.assertTrue(self.evaluate(tower_local_var.is_initialized()))
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/py.test
import binascii
import ctypes as c
import hashlib
import os
import random
import curve25519
import ecdsa
import pytest
def bytes2num(s):
res = 0
for i, b in enumerate(reversed(bytearray(s))):
res += b << (i * 8)
return res
curves = {"nist256p1": ecdsa.curves.NIST256p, "secp256k1": ecdsa.curves.SECP256k1}
class Point:
def __init__(self, name, x, y):
self.curve = name
self.x = x
self.y = y
points = [
Point(
"secp256k1",
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8,
),
Point(
"secp256k1",
0x1,
0x4218F20AE6C646B363DB68605822FB14264CA8D2587FDD6FBC750D587E76A7EE,
),
Point(
"secp256k1",
0x2,
0x66FBE727B2BA09E09F5A98D70A5EFCE8424C5FA425BBDA1C511F860657B8535E,
),
Point(
"secp256k1",
0x1B,
0x1ADCEA1CF831B0AD1653E769D1A229091D0CC68D4B0328691B9CAACC76E37C90,
),
Point(
"nist256p1",
0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,
0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5,
),
Point(
"nist256p1",
0x0,
0x66485C780E2F83D72433BD5D84A06BB6541C2AF31DAE871728BF856A174F93F4,
),
Point(
"nist256p1",
0x0,
0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B,
),
Point(
"nist256p1",
0xAF8BBDFE8CDD5577ACBF345B543D28CF402F4E94D3865B97EA0787F2D3AA5D22,
0x35802B8B376B995265918B078BC109C21A535176585C40F519ACA52D6AFC147C,
),
Point(
"nist256p1",
0x80000,
0x580610071F440F0DCC14A22E2D5D5AFC1224C0CD11A3B4B51B8ECD2224EE1CE2,
),
]
random_iters = int(os.environ.get("ITERS", 1))
DIR = os.path.abspath(os.path.dirname(__file__))
lib = c.cdll.LoadLibrary(os.path.join(DIR, "libtrezor-crypto.so"))
class curve_info(c.Structure):
_fields_ = [("bip32_name", c.c_char_p), ("params", c.c_void_p)]
lib.get_curve_by_name.restype = c.POINTER(curve_info)
BIGNUM = c.c_uint32 * 9
class Random(random.Random):
def randbytes(self, n):
buf = (c.c_uint8 * n)()
for i in range(n):
buf[i] = self.randrange(0, 256)
return buf
def randpoint(self, curve):
k = self.randrange(0, curve.order)
return k * curve.generator
def int2bn(x, bn_type=BIGNUM):
b = bn_type()
b._int = x
for i in range(len(b)):
b[i] = x % (1 << 30)
x = x >> 30
return b
def bn2int(b):
x = 0
for i in range(len(b)):
x += b[i] << (30 * i)
return x
@pytest.fixture(params=range(random_iters))
def r(request):
seed = request.param
return Random(seed + int(os.environ.get("SEED", 0)))
@pytest.fixture(params=list(sorted(curves)))
def curve(request):
name = request.param
curve_ptr = lib.get_curve_by_name(bytes(name, "ascii")).contents.params
assert curve_ptr, "curve {} not found".format(name)
curve_obj = curves[name]
curve_obj.ptr = c.c_void_p(curve_ptr)
curve_obj.p = curve_obj.curve.p() # shorthand
return curve_obj
@pytest.fixture(params=points)
def point(request):
name = request.param.curve
curve_ptr = lib.get_curve_by_name(bytes(name, "ascii")).contents.params
assert curve_ptr, "curve {} not found".format(name)
curve_obj = curves[name]
curve_obj.ptr = c.c_void_p(curve_ptr)
curve_obj.p = ecdsa.ellipticcurve.Point(
curve_obj.curve, request.param.x, request.param.y
)
return curve_obj
def test_inverse(curve, r):
x = r.randrange(1, curve.p)
y = int2bn(x)
lib.bn_inverse(y, int2bn(curve.p))
y = bn2int(y)
y_ = ecdsa.numbertheory.inverse_mod(x, curve.p)
assert y == y_
def test_is_less(curve, r):
x = r.randrange(0, curve.p)
y = r.randrange(0, curve.p)
x_ = int2bn(x)
y_ = int2bn(y)
res = lib.bn_is_less(x_, y_)
assert res == (x < y)
res = lib.bn_is_less(y_, x_)
assert res == (y < x)
def test_is_equal(curve, r):
x = r.randrange(0, curve.p)
y = r.randrange(0, curve.p)
x_ = int2bn(x)
y_ = int2bn(y)
assert lib.bn_is_equal(x_, y_) == (x == y)
assert lib.bn_is_equal(x_, x_) == 1
assert lib.bn_is_equal(y_, y_) == 1
def test_is_zero(curve, r):
x = r.randrange(0, curve.p)
assert lib.bn_is_zero(int2bn(x)) == (not x)
def test_simple_comparisons():
assert lib.bn_is_zero(int2bn(0)) == 1
assert lib.bn_is_zero(int2bn(1)) == 0
assert lib.bn_is_less(int2bn(0), int2bn(0)) == 0
assert lib.bn_is_less(int2bn(1), int2bn(0)) == 0
assert lib.bn_is_less(int2bn(0), int2bn(1)) == 1
assert lib.bn_is_equal(int2bn(0), int2bn(0)) == 1
assert lib.bn_is_equal(int2bn(1), int2bn(0)) == 0
assert lib.bn_is_equal(int2bn(0), int2bn(1)) == 0
def test_mult_half(curve, r):
x = r.randrange(0, 2 * curve.p)
y = int2bn(x)
lib.bn_mult_half(y, int2bn(curve.p))
y = bn2int(y)
if y >= curve.p:
y -= curve.p
half = ecdsa.numbertheory.inverse_mod(2, curve.p)
assert y == (x * half) % curve.p
def test_subtractmod(curve, r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
z = int2bn(0)
lib.bn_subtractmod(int2bn(x), int2bn(y), z, int2bn(curve.p))
z = bn2int(z)
z_ = x + 2 * curve.p - y
assert z == z_
def test_subtract2(r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
x, y = max(x, y), min(x, y)
z = int2bn(0)
lib.bn_subtract(int2bn(x), int2bn(y), z)
z = bn2int(z)
z_ = x - y
assert z == z_
def test_add(curve, r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
z_ = x + y
z = int2bn(x)
lib.bn_add(z, int2bn(y))
z = bn2int(z)
assert z == z_
def test_addmod(curve, r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
z_ = (x + y) % curve.p
z = int2bn(x)
lib.bn_addmod(z, int2bn(y), int2bn(curve.p))
z = bn2int(z)
if z >= curve.p:
z = z - curve.p
assert z == z_
def test_multiply(curve, r):
k = r.randrange(0, 2 * curve.p)
x = r.randrange(0, 2 * curve.p)
z = (k * x) % curve.p
k = int2bn(k)
z_ = int2bn(x)
p_ = int2bn(curve.p)
lib.bn_multiply(k, z_, p_)
z_ = bn2int(z_)
assert z_ < 2 * curve.p
if z_ >= curve.p:
z_ = z_ - curve.p
assert z_ == z
def test_multiply1(curve, r):
k = r.randrange(0, 2 * curve.p)
x = r.randrange(0, 2 * curve.p)
kx = k * x
res = int2bn(0, bn_type=(c.c_uint32 * 18))
lib.bn_multiply_long(int2bn(k), int2bn(x), res)
res = bn2int(res)
assert res == kx
def test_multiply2(curve, r):
x = int2bn(0)
s = r.randrange(0, 2 ** 526)
res = int2bn(s, bn_type=(c.c_uint32 * 18))
prime = int2bn(curve.p)
lib.bn_multiply_reduce(x, res, prime)
x = bn2int(x) % curve.p
x_ = s % curve.p
assert x == x_
def test_fast_mod(curve, r):
x = r.randrange(0, 128 * curve.p)
y = int2bn(x)
lib.bn_fast_mod(y, int2bn(curve.p))
y = bn2int(y)
assert y < 2 * curve.p
if y >= curve.p:
y -= curve.p
assert x % curve.p == y
def test_mod(curve, r):
x = r.randrange(0, 2 * curve.p)
y = int2bn(x)
lib.bn_mod(y, int2bn(curve.p))
assert bn2int(y) == x % curve.p
def test_mod_specific(curve):
p = curve.p
for x in [0, 1, 2, p - 2, p - 1, p, p + 1, p + 2, 2 * p - 2, 2 * p - 1]:
y = int2bn(x)
lib.bn_mod(y, int2bn(curve.p))
assert bn2int(y) == x % p
POINT = BIGNUM * 2
def to_POINT(p):
return POINT(int2bn(p.x()), int2bn(p.y()))
def from_POINT(p):
return (bn2int(p[0]), bn2int(p[1]))
JACOBIAN = BIGNUM * 3
def to_JACOBIAN(jp):
return JACOBIAN(int2bn(jp[0]), int2bn(jp[1]), int2bn(jp[2]))
def from_JACOBIAN(p):
return (bn2int(p[0]), bn2int(p[1]), bn2int(p[2]))
def test_point_multiply(curve, r):
p = r.randpoint(curve)
k = r.randrange(0, 2 ** 256)
kp = k * p
res = POINT(int2bn(0), int2bn(0))
lib.point_multiply(curve.ptr, int2bn(k), to_POINT(p), res)
res = from_POINT(res)
assert res == (kp.x(), kp.y())
def test_point_add(curve, r):
p1 = r.randpoint(curve)
p2 = r.randpoint(curve)
# print '-' * 80
q = p1 + p2
q1 = to_POINT(p1)
q2 = to_POINT(p2)
lib.point_add(curve.ptr, q1, q2)
q_ = from_POINT(q2)
assert q_ == (q.x(), q.y())
def test_point_double(curve, r):
p = r.randpoint(curve)
q = p.double()
q_ = to_POINT(p)
lib.point_double(curve.ptr, q_)
q_ = from_POINT(q_)
assert q_ == (q.x(), q.y())
def test_point_to_jacobian(curve, r):
p = r.randpoint(curve)
jp = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p), jp, int2bn(curve.p))
jx, jy, jz = from_JACOBIAN(jp)
assert jx % curve.p == (p.x() * jz ** 2) % curve.p
assert jy % curve.p == (p.y() * jz ** 3) % curve.p
q = POINT()
lib.jacobian_to_curve(jp, q, int2bn(curve.p))
q = from_POINT(q)
assert q == (p.x(), p.y())
def test_cond_negate(curve, r):
x = r.randrange(0, curve.p)
a = int2bn(x)
lib.conditional_negate(0, a, int2bn(curve.p))
assert bn2int(a) == x
lib.conditional_negate(-1, a, int2bn(curve.p))
assert bn2int(a) == 2 * curve.p - x
def test_jacobian_add(curve, r):
p1 = r.randpoint(curve)
p2 = r.randpoint(curve)
prime = int2bn(curve.p)
q = POINT()
jp2 = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p2), jp2, prime)
lib.point_jacobian_add(to_POINT(p1), jp2, curve.ptr)
lib.jacobian_to_curve(jp2, q, prime)
q = from_POINT(q)
p_ = p1 + p2
assert (p_.x(), p_.y()) == q
def test_jacobian_add_double(curve, r):
p1 = r.randpoint(curve)
p2 = p1
prime = int2bn(curve.p)
q = POINT()
jp2 = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p2), jp2, prime)
lib.point_jacobian_add(to_POINT(p1), jp2, curve.ptr)
lib.jacobian_to_curve(jp2, q, prime)
q = from_POINT(q)
p_ = p1 + p2
assert (p_.x(), p_.y()) == q
def test_jacobian_double(curve, r):
p = r.randpoint(curve)
p2 = p.double()
prime = int2bn(curve.p)
q = POINT()
jp = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p), jp, prime)
lib.point_jacobian_double(jp, curve.ptr)
lib.jacobian_to_curve(jp, q, prime)
q = from_POINT(q)
assert (p2.x(), p2.y()) == q
def sigdecode(sig, _):
return map(bytes2num, [sig[:32], sig[32:]])
def test_sign(curve, r):
priv = r.randbytes(32)
digest = r.randbytes(32)
sig = r.randbytes(64)
lib.ecdsa_sign_digest(curve.ptr, priv, digest, sig, c.c_void_p(0), c.c_void_p(0))
exp = bytes2num(priv)
sk = ecdsa.SigningKey.from_secret_exponent(exp, curve, hashfunc=hashlib.sha256)
vk = sk.get_verifying_key()
sig_ref = sk.sign_digest_deterministic(
digest, hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string_canonize
)
assert binascii.hexlify(sig) == binascii.hexlify(sig_ref)
assert vk.verify_digest(sig, digest, sigdecode)
def test_validate_pubkey(curve, r):
p = r.randpoint(curve)
assert lib.ecdsa_validate_pubkey(curve.ptr, to_POINT(p))
def test_validate_pubkey_direct(point):
assert lib.ecdsa_validate_pubkey(point.ptr, to_POINT(point.p))
def test_curve25519(r):
sec1 = bytes(bytearray(r.randbytes(32)))
sec2 = bytes(bytearray(r.randbytes(32)))
pub1 = curve25519.Private(sec1).get_public()
pub2 = curve25519.Private(sec2).get_public()
session1 = r.randbytes(32)
lib.curve25519_scalarmult(session1, sec2, pub1.public)
session2 = r.randbytes(32)
lib.curve25519_scalarmult(session2, sec1, pub2.public)
assert bytearray(session1) == bytearray(session2)
shared1 = curve25519.Private(sec2).get_shared_key(pub1, hashfunc=lambda x: x)
shared2 = curve25519.Private(sec1).get_shared_key(pub2, hashfunc=lambda x: x)
assert shared1 == shared2
assert bytearray(session1) == shared1
assert bytearray(session2) == shared2
def test_curve25519_pubkey(r):
sec = bytes(bytearray(r.randbytes(32)))
pub = curve25519.Private(sec).get_public()
res = r.randbytes(32)
lib.curve25519_scalarmult_basepoint(res, sec)
assert bytearray(res) == pub.public
def test_curve25519_scalarmult_from_gpg(r):
sec = binascii.unhexlify(
"4a1e76f133afb29dbc7860bcbc16d0e829009cc15c2f81ed26de1179b1d9c938"
)
pub = binascii.unhexlify(
"5d6fc75c016e85b17f54e0128a216d5f9229f25bac1ec85cecab8daf48621b31"
)
res = r.randbytes(32)
lib.curve25519_scalarmult(res, sec[::-1], pub[::-1])
expected = "a93dbdb23e5c99da743e203bd391af79f2b83fb8d0fd6ec813371c71f08f2d4d"
assert binascii.hexlify(bytearray(res)) == bytes(expected, "ascii")
|
|
###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("Cases",
"CaseCategories",
"CaseSubCategories",
"CaseBasename",)
CaseSetname = "websocket"
CaseBasename = "Case"
##
## To add new cases
##
## 1) create a class in subdir "case" (derived from Case, and appropriately named)
## 2) import the class here (see below)
## 3) add class to Cases list (see below)
##
##
## Case classes are named "CaseX_Y_Z" where X is from these test case categories:
##
CaseCategories = {"0": "Handshake",
"1": "Framing",
"2": "Pings/Pongs",
"3": "Reserved Bits",
"4": "Opcodes",
"5": "Fragmentation",
"6": "UTF-8 Handling",
"7": "Close Handling",
"8": "Misc",
"9": "Limits/Performance",
"10": "Misc",
"12": "WebSocket Compression"}
CaseSubCategories = {"1.1": "Text Messages",
"1.2": "Binary Messages",
"4.1": "Non-control Opcodes",
"4.2": "Control Opcodes",
"6.1": "Valid UTF-8 with zero payload fragments",
"6.2": "Valid UTF-8 unfragmented, fragmented on code-points and within code-points",
"6.3": "Invalid UTF-8 differently fragmented",
"6.4": "Fail-fast on invalid UTF-8",
"7.1": "Basic close behavior (fuzzer initiated)",
# "7.2": "Basic close behavior (peer initiated)",
"7.3": "Close frame structure: payload length (fuzzer initiated)",
# "7.4": "Close frame structure: payload length (peer initiated)",
"7.5": "Close frame structure: payload value (fuzzer initiated)",
# "7.6": "Close frame structure: payload value (peer initiated)",
"7.7": "Close frame structure: valid close codes (fuzzer initiated)",
# "7.8": "Close frame structure: valid close codes (peer initiated)",
"7.9": "Close frame structure: invalid close codes (fuzzer initiated)",
# "7.10": "Close frame structure: invalid close codes (peer initiated)",
# "7.11": "Peer initiated timeouts",
"7.13": "Informational close information (fuzzer initiated)",
"9.1": "Text Message (increasing size)",
"9.2": "Binary Message (increasing size)",
"9.3": "Fragmented Text Message (fixed size, increasing fragment size)",
"9.4": "Fragmented Binary Message (fixed size, increasing fragment size)",
"9.5": "Text Message (fixed size, increasing chop size)",
"9.6": "Binary Text Message (fixed size, increasing chop size)",
"9.7": "Text Message Roundtrip Time (fixed number, increasing size)",
"9.8": "Binary Message Roundtrip Time (fixed number, increasing size)",
"9.9": "Text Message (unlimited size)",
"9.10": "Binary Message (unlimited size)",
"10.1": "Auto-Fragmentation"
}
##
## Cases
##
from case1_1_1 import *
from case1_1_2 import *
from case1_1_3 import *
from case1_1_4 import *
from case1_1_5 import *
from case1_1_6 import *
from case1_1_7 import *
from case1_1_8 import *
from case1_2_1 import *
from case1_2_2 import *
from case1_2_3 import *
from case1_2_4 import *
from case1_2_5 import *
from case1_2_6 import *
from case1_2_7 import *
from case1_2_8 import *
from case2_1 import *
from case2_2 import *
from case2_3 import *
from case2_4 import *
from case2_5 import *
from case2_6 import *
from case2_7 import *
from case2_8 import *
from case2_9 import *
from case2_10 import *
from case2_11 import *
from case3_1 import *
from case3_2 import *
from case3_3 import *
from case3_4 import *
from case3_5 import *
from case3_6 import *
from case3_7 import *
from case4_1_1 import *
from case4_1_2 import *
from case4_1_3 import *
from case4_1_4 import *
from case4_1_5 import *
from case4_2_1 import *
from case4_2_2 import *
from case4_2_3 import *
from case4_2_4 import *
from case4_2_5 import *
from case5_1 import *
from case5_2 import *
from case5_3 import *
from case5_4 import *
from case5_5 import *
from case5_6 import *
from case5_7 import *
from case5_8 import *
from case5_9 import *
from case5_10 import *
from case5_11 import *
from case5_12 import *
from case5_13 import *
from case5_14 import *
from case5_15 import *
from case5_16 import *
from case5_17 import *
from case5_18 import *
from case5_19 import *
from case5_20 import *
from case6_1_1 import *
from case6_1_2 import *
from case6_1_3 import *
from case6_2_1 import *
from case6_2_2 import *
from case6_2_3 import *
from case6_2_4 import *
from case6_3_1 import *
from case6_3_2 import *
from case6_4_1 import *
from case6_4_2 import *
from case6_4_3 import *
from case6_4_4 import *
from case6_x_x import *
from case7_1_1 import *
from case7_1_2 import *
from case7_1_3 import *
from case7_1_4 import *
from case7_1_5 import *
from case7_1_6 import *
from case7_3_1 import *
from case7_3_2 import *
from case7_3_3 import *
from case7_3_4 import *
from case7_3_5 import *
from case7_3_6 import *
from case7_5_1 import *
from case7_7_X import *
from case7_9_X import *
from case7_13_1 import *
from case7_13_2 import *
from case9_1_1 import *
from case9_1_2 import *
from case9_1_3 import *
from case9_1_4 import *
from case9_1_5 import *
from case9_1_6 import *
from case9_2_1 import *
from case9_2_2 import *
from case9_2_3 import *
from case9_2_4 import *
from case9_2_5 import *
from case9_2_6 import *
from case9_3_1 import *
from case9_3_2 import *
from case9_3_3 import *
from case9_3_4 import *
from case9_3_5 import *
from case9_3_6 import *
from case9_3_7 import *
from case9_3_8 import *
from case9_3_9 import *
from case9_4_1 import *
from case9_4_2 import *
from case9_4_3 import *
from case9_4_4 import *
from case9_4_5 import *
from case9_4_6 import *
from case9_4_7 import *
from case9_4_8 import *
from case9_4_9 import *
from case9_5_1 import *
from case9_5_2 import *
from case9_5_3 import *
from case9_5_4 import *
from case9_5_5 import *
from case9_5_6 import *
from case9_6_1 import *
from case9_6_2 import *
from case9_6_3 import *
from case9_6_4 import *
from case9_6_5 import *
from case9_6_6 import *
from case9_7_X import *
from case9_9_1 import *
from case10_1_1 import *
##
## This is the list of Case classes that will be run by the fuzzing server/client
##
Cases = []
Cases += [Case1_1_1, Case1_1_2, Case1_1_3, Case1_1_4, Case1_1_5, Case1_1_6, Case1_1_7, Case1_1_8]
Cases += [Case1_2_1, Case1_2_2, Case1_2_3, Case1_2_4, Case1_2_5, Case1_2_6, Case1_2_7, Case1_2_8]
Cases += [Case2_1, Case2_2, Case2_3, Case2_4, Case2_5, Case2_6, Case2_7, Case2_8, Case2_9, Case2_10, Case2_11]
Cases += [Case3_1, Case3_2, Case3_3, Case3_4, Case3_5, Case3_6, Case3_7]
Cases += [Case4_1_1, Case4_1_2, Case4_1_3, Case4_1_4, Case4_1_5]
Cases += [Case4_2_1, Case4_2_2, Case4_2_3, Case4_2_4, Case4_2_5]
Cases += [Case5_1, Case5_2, Case5_3, Case5_4, Case5_5, Case5_6, Case5_7, Case5_8, Case5_9, Case5_10, Case5_11, Case5_12, Case5_13, Case5_14, Case5_15, Case5_16, Case5_17, Case5_18, Case5_19, Case5_20]
Cases += [Case6_1_1, Case6_1_2, Case6_1_3]
Cases += [Case6_2_1, Case6_2_2, Case6_2_3, Case6_2_4]
Cases += [Case6_3_1, Case6_3_2]
Cases += [Case6_4_1, Case6_4_2, Case6_4_3, Case6_4_4]
Cases.extend(Case6_X_X)
CaseSubCategories.update(Case6_X_X_CaseSubCategories)
Cases += [Case7_1_1, Case7_1_2, Case7_1_3, Case7_1_4, Case7_1_5, Case7_1_6]
Cases += [Case7_3_1, Case7_3_2, Case7_3_3, Case7_3_4, Case7_3_5, Case7_3_6]
Cases += [Case7_5_1]
Cases.extend(Case7_7_X)
Cases.extend(Case7_9_X)
Cases += [Case7_13_1, Case7_13_2]
Cases += [Case9_1_1, Case9_1_2, Case9_1_3, Case9_1_4, Case9_1_5, Case9_1_6]
Cases += [Case9_2_1, Case9_2_2, Case9_2_3, Case9_2_4, Case9_2_5, Case9_2_6]
Cases += [Case9_3_1, Case9_3_2, Case9_3_3, Case9_3_4, Case9_3_5, Case9_3_6, Case9_3_7, Case9_3_8, Case9_3_9]
Cases += [Case9_4_1, Case9_4_2, Case9_4_3, Case9_4_4, Case9_4_5, Case9_4_6, Case9_4_7, Case9_4_8, Case9_4_9]
Cases += [Case9_5_1, Case9_5_2, Case9_5_3, Case9_5_4, Case9_5_5, Case9_5_6]
Cases += [Case9_6_1, Case9_6_2, Case9_6_3, Case9_6_4, Case9_6_5, Case9_6_6]
# this produces case 9.7.X and 9.8.X ... all come from one file: Case9_7_X .. its a bit hacky, ok.
Cases.extend(Case9_7_X)
Cases.extend(Case9_8_X)
#Cases += [Case9_9_1]
Cases += [Case10_1_1]
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Provides a command container for additional tox commands, used in "tox.ini".
COMMANDS:
* copytree
* copy
* py2to3
REQUIRES:
* argparse
"""
from glob import glob
import argparse
import inspect
import os.path
import shutil
import sys
__author__ = "Jens Engel"
__copyright__ = "(c) 2013 by Jens Engel"
__license__ = "BSD"
# -----------------------------------------------------------------------------
# CONSTANTS:
# -----------------------------------------------------------------------------
VERSION = "0.1.0"
FORMATTER_CLASS = argparse.RawDescriptionHelpFormatter
# -----------------------------------------------------------------------------
# SUBCOMMAND: copytree
# -----------------------------------------------------------------------------
def command_copytree(args):
"""
Copy one or more source directory(s) below a destination directory.
Parts of the destination directory path are created if needed.
Similar to the UNIX command: 'cp -R srcdir destdir'
"""
for srcdir in args.srcdirs:
basename = os.path.basename(srcdir)
destdir2 = os.path.normpath(os.path.join(args.destdir, basename))
if os.path.exists(destdir2):
shutil.rmtree(destdir2)
sys.stdout.write("copytree: %s => %s\n" % (srcdir, destdir2))
shutil.copytree(srcdir, destdir2)
return 0
def setup_parser_copytree(parser):
parser.add_argument("srcdirs", nargs="+", help="Source directory(s)")
parser.add_argument("destdir", help="Destination directory")
command_copytree.usage = "%(prog)s srcdir... destdir"
command_copytree.short = "Copy source dir(s) below a destination directory."
command_copytree.setup_parser = setup_parser_copytree
# -----------------------------------------------------------------------------
# SUBCOMMAND: copy
# -----------------------------------------------------------------------------
def command_copy(args):
"""
Copy one or more source-files(s) to a destpath (destfile or destdir).
Destdir mode is used if:
* More than one srcfile is provided
* Last parameter ends with a slash ("/").
* Last parameter is an existing directory
Destination directory path is created if needed.
Similar to the UNIX command: 'cp srcfile... destpath'
"""
sources = args.sources
destpath = args.destpath
source_files = []
for file_ in sources:
if "*" in file_:
selected = glob(file_)
source_files.extend(selected)
elif os.path.isfile(file_):
source_files.append(file_)
if destpath.endswith("/") or os.path.isdir(destpath) or len(sources) > 1:
# -- DESTDIR-MODE: Last argument is a directory.
destdir = destpath
else:
# -- DESTFILE-MODE: Copy (and rename) one file.
assert len(source_files) == 1
destdir = os.path.dirname(destpath)
# -- WORK-HORSE: Copy one or more files to destpath.
if not os.path.isdir(destdir):
sys.stdout.write("copy: Create dir %s\n" % destdir)
os.makedirs(destdir)
for source in source_files:
destname = os.path.join(destdir, os.path.basename(source))
sys.stdout.write("copy: %s => %s\n" % (source, destname))
shutil.copy(source, destname)
return 0
def setup_parser_copy(parser):
parser.add_argument("sources", nargs="+", help="Source files.")
parser.add_argument("destpath", help="Destination path")
command_copy.usage = "%(prog)s sources... destpath"
command_copy.short = "Copy one or more source files to a destinition."
command_copy.setup_parser = setup_parser_copy
# -----------------------------------------------------------------------------
# SUBCOMMAND: mkdir
# -----------------------------------------------------------------------------
def command_mkdir(args):
"""
Create a non-existing directory (or more ...).
If the directory exists, the step is skipped.
Similar to the UNIX command: 'mkdir -p dir'
"""
errors = 0
for directory in args.dirs:
if os.path.exists(directory):
if not os.path.isdir(directory):
# -- SANITY CHECK: directory exists, but as file...
sys.stdout.write("mkdir: %s\n" % directory)
sys.stdout.write("ERROR: Exists already, but as file...\n")
errors += 1
else:
# -- NORMAL CASE: Directory does not exits yet.
assert not os.path.isdir(directory)
sys.stdout.write("mkdir: %s\n" % directory)
os.makedirs(directory)
return errors
def setup_parser_mkdir(parser):
parser.add_argument("dirs", nargs="+", help="Directory(s)")
command_mkdir.usage = "%(prog)s dir..."
command_mkdir.short = "Create non-existing directory (or more...)."
command_mkdir.setup_parser = setup_parser_mkdir
# -----------------------------------------------------------------------------
# SUBCOMMAND: py2to3
# -----------------------------------------------------------------------------
def command_py2to3(args):
"""
Apply '2to3' tool (Python2 to Python3 conversion tool) to Python sources.
"""
from lib2to3.main import main
sys.exit(main("lib2to3.fixes", args=args.sources))
def setup_parser4py2to3(parser):
parser.add_argument("sources", nargs="+", help="Source files.")
command_py2to3.name = "2to3"
command_py2to3.usage = "%(prog)s sources..."
command_py2to3.short = "Apply python's 2to3 tool to Python sources."
command_py2to3.setup_parser = setup_parser4py2to3
# -----------------------------------------------------------------------------
# COMMAND HELPERS/UTILS:
# -----------------------------------------------------------------------------
def discover_commands():
commands = []
for name, func in inspect.getmembers(inspect.getmodule(toxcmd_main)):
if name.startswith("__"):
continue
if name.startswith("command_") and callable(func):
command_name0 = name.replace("command_", "")
command_name = getattr(func, "name", command_name0)
commands.append(Command(command_name, func))
return commands
class Command(object):
def __init__(self, name, func):
assert isinstance(name, basestring)
assert callable(func)
self.name = name
self.func = func
self.parser = None
def setup_parser(self, command_parser):
setup_parser = getattr(self.func, "setup_parser", None)
if setup_parser and callable(setup_parser):
setup_parser(command_parser)
else:
command_parser.add_argument("args", nargs="*")
@property
def usage(self):
usage = getattr(self.func, "usage", None)
return usage
@property
def short_description(self):
short_description = getattr(self.func, "short", "")
return short_description
@property
def description(self):
return inspect.getdoc(self.func)
def __call__(self, args):
return self.func(args)
# -----------------------------------------------------------------------------
# MAIN-COMMAND:
# -----------------------------------------------------------------------------
def toxcmd_main(args=None):
"""Command util with subcommands for tox environments."""
usage = "USAGE: %(prog)s [OPTIONS] COMMAND args..."
if args is None:
args = sys.argv[1:]
# -- STEP: Build command-line parser.
parser = argparse.ArgumentParser(description=inspect.getdoc(toxcmd_main),
formatter_class=FORMATTER_CLASS)
common_parser = parser.add_argument_group("Common options")
common_parser.add_argument("--version", action="version", version=VERSION)
subparsers = parser.add_subparsers(help="commands")
for command in discover_commands():
command_parser = subparsers.add_parser(command.name,
usage=command.usage,
description=command.description,
help=command.short_description,
formatter_class=FORMATTER_CLASS)
command_parser.set_defaults(func=command)
command.setup_parser(command_parser)
command.parser = command_parser
# -- STEP: Process command-line and run command.
options = parser.parse_args(args)
command_function = options.func
return command_function(options)
# -----------------------------------------------------------------------------
# MAIN:
# -----------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(toxcmd_main())
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
import six
from keystone.common import wsgi
import keystone.conf
from keystone import exception
from keystone.tests import unit
CONF = keystone.conf.CONF
class ExceptionTestCase(unit.BaseTestCase):
def assertValidJsonRendering(self, e):
resp = wsgi.render_exception(e)
self.assertEqual(e.code, resp.status_int)
self.assertEqual('%s %s' % (e.code, e.title), resp.status)
j = jsonutils.loads(resp.body)
self.assertIsNotNone(j.get('error'))
self.assertIsNotNone(j['error'].get('code'))
self.assertIsNotNone(j['error'].get('title'))
self.assertIsNotNone(j['error'].get('message'))
self.assertNotIn('\n', j['error']['message'])
self.assertNotIn(' ', j['error']['message'])
self.assertTrue(type(j['error']['code']) is int)
def test_all_json_renderings(self):
"""Everything callable in the exception module should be renderable.
... except for the base error class (exception.Error), which is not
user-facing.
This test provides a custom message to bypass docstring parsing, which
should be tested separately.
"""
for cls in [x for x in exception.__dict__.values() if callable(x)]:
if cls is not exception.Error and isinstance(cls, exception.Error):
self.assertValidJsonRendering(cls(message='Overridden.'))
def test_validation_error(self):
target = uuid.uuid4().hex
attribute = uuid.uuid4().hex
e = exception.ValidationError(target=target, attribute=attribute)
self.assertValidJsonRendering(e)
self.assertIn(target, six.text_type(e))
self.assertIn(attribute, six.text_type(e))
def test_not_found(self):
target = uuid.uuid4().hex
e = exception.NotFound(target=target)
self.assertValidJsonRendering(e)
self.assertIn(target, six.text_type(e))
def test_forbidden_title(self):
e = exception.Forbidden()
resp = wsgi.render_exception(e)
j = jsonutils.loads(resp.body)
self.assertEqual('Forbidden', e.title)
self.assertEqual('Forbidden', j['error'].get('title'))
def test_unicode_message(self):
message = u'Comment \xe7a va'
e = exception.Error(message)
try:
self.assertEqual(message, six.text_type(e))
except UnicodeEncodeError:
self.fail("unicode error message not supported")
def test_unicode_string(self):
e = exception.ValidationError(attribute='xx',
target='Long \xe2\x80\x93 Dash')
if six.PY2:
self.assertIn(u'\u2013', six.text_type(e))
else:
self.assertIn('Long \xe2\x80\x93 Dash', six.text_type(e))
def test_invalid_unicode_string(self):
# NOTE(jamielennox): This is a complete failure case so what is
# returned in the exception message is not that important so long
# as there is an error with a message
e = exception.ValidationError(attribute='xx',
target='\xe7a va')
if six.PY2:
self.assertIn('%(attribute)', six.text_type(e))
else:
# There's no UnicodeDecodeError on python 3.
self.assertIn('\xe7a va', six.text_type(e))
class UnexpectedExceptionTestCase(ExceptionTestCase):
"""Test if internal info is exposed to the API user on UnexpectedError."""
class SubClassExc(exception.UnexpectedError):
debug_message_format = 'Debug Message: %(debug_info)s'
def setUp(self):
super(UnexpectedExceptionTestCase, self).setUp()
self.exc_str = uuid.uuid4().hex
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
def test_unexpected_error_no_debug(self):
self.config_fixture.config(debug=False)
e = exception.UnexpectedError(exception=self.exc_str)
self.assertNotIn(self.exc_str, six.text_type(e))
def test_unexpected_error_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
e = exception.UnexpectedError(exception=self.exc_str)
self.assertIn(self.exc_str, six.text_type(e))
def test_unexpected_error_subclass_no_debug(self):
self.config_fixture.config(debug=False)
e = UnexpectedExceptionTestCase.SubClassExc(
debug_info=self.exc_str)
self.assertEqual(exception.UnexpectedError.message_format,
six.text_type(e))
def test_unexpected_error_subclass_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
subclass = self.SubClassExc
e = subclass(debug_info=self.exc_str)
expected = subclass.debug_message_format % {'debug_info': self.exc_str}
self.assertEqual(
'%s %s' % (expected, exception.SecurityError.amendment),
six.text_type(e))
def test_unexpected_error_custom_message_no_debug(self):
self.config_fixture.config(debug=False)
e = exception.UnexpectedError(self.exc_str)
self.assertEqual(exception.UnexpectedError.message_format,
six.text_type(e))
def test_unexpected_error_custom_message_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
e = exception.UnexpectedError(self.exc_str)
self.assertEqual(
'%s %s' % (self.exc_str, exception.SecurityError.amendment),
six.text_type(e))
def test_unexpected_error_custom_message_exception_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
orig_e = exception.NotFound(target=uuid.uuid4().hex)
e = exception.UnexpectedError(orig_e)
self.assertEqual(
'%s %s' % (six.text_type(orig_e),
exception.SecurityError.amendment),
six.text_type(e))
def test_unexpected_error_custom_message_binary_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
binary_msg = b'something'
e = exception.UnexpectedError(binary_msg)
self.assertEqual(
'%s %s' % (six.text_type(binary_msg),
exception.SecurityError.amendment),
six.text_type(e))
class SecurityErrorTestCase(ExceptionTestCase):
"""Test whether security-related info is exposed to the API user."""
def setUp(self):
super(SecurityErrorTestCase, self).setUp()
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
def test_unauthorized_exposure(self):
self.config_fixture.config(debug=False)
risky_info = uuid.uuid4().hex
e = exception.Unauthorized(message=risky_info)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
def test_unauthorized_exposure_in_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
risky_info = uuid.uuid4().hex
e = exception.Unauthorized(message=risky_info)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
def test_forbidden_exposure(self):
self.config_fixture.config(debug=False)
risky_info = uuid.uuid4().hex
e = exception.Forbidden(message=risky_info)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
def test_forbidden_exposure_in_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
risky_info = uuid.uuid4().hex
e = exception.Forbidden(message=risky_info)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
def test_forbidden_action_exposure(self):
self.config_fixture.config(debug=False)
risky_info = uuid.uuid4().hex
action = uuid.uuid4().hex
e = exception.ForbiddenAction(message=risky_info, action=action)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
self.assertIn(action, six.text_type(e))
self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
e = exception.ForbiddenAction(action=action)
self.assertValidJsonRendering(e)
self.assertIn(action, six.text_type(e))
self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
def test_forbidden_action_exposure_in_debug(self):
self.config_fixture.config(debug=True, insecure_debug=True)
risky_info = uuid.uuid4().hex
action = uuid.uuid4().hex
e = exception.ForbiddenAction(message=risky_info, action=action)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
self.assertIn(exception.SecurityError.amendment, six.text_type(e))
e = exception.ForbiddenAction(action=action)
self.assertValidJsonRendering(e)
self.assertIn(action, six.text_type(e))
self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
def test_forbidden_action_no_message(self):
# When no custom message is given when the ForbiddenAction (or other
# SecurityError subclass) is created the exposed message is the same
# whether debug is enabled or not.
action = uuid.uuid4().hex
self.config_fixture.config(debug=False)
e = exception.ForbiddenAction(action=action)
exposed_message = six.text_type(e)
self.assertIn(action, exposed_message)
self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
self.config_fixture.config(debug=True)
e = exception.ForbiddenAction(action=action)
self.assertEqual(exposed_message, six.text_type(e))
def test_unicode_argument_message(self):
self.config_fixture.config(debug=False)
risky_info = u'\u7ee7\u7eed\u884c\u7f29\u8fdb\u6216'
e = exception.Forbidden(message=risky_info)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
|
|
# This program lets up to three players play Travel Bingo! Each player is assigned
# a grid-card with random items commonly seen from a car, and the first player
# to reach the chosen bingo goal will win. Players can keep playing until they choose to stop.
import random
def loadItems() :
'''Read file of potential items and store as a list of strings. Return list of strings.'''
stringList = []
with open("C:/Python27/Scripts/travelItemList.txt", "r") as in_file :
for line in in_file :
line = line.strip()
stringList.append(line)
return stringList
def getNum(prompt, low, high ):
'''Asks user for number input and checks validity based on upper and lower limits.
Will loop until the user provides valid input.
Parameters: prompt = text to ask user for input; low = lower limit; high = upper limit.'''
checkFlag = False
while not checkFlag :
num = int(raw_input(prompt))
if num < low :
print "You need to provide a number higher than {}. Please try again.".format(low)
elif num > high :
print "You need to provide a number lower than {}. Please try again.".format(high)
else :
checkFlag = True
return num
def getGrid() :
'''Ask user for grid size and return list of column and row length'''
grid = []
rows = getNum("Number of rows: ", 0, 54)
columns = getNum("Number of columns: ", 0, (54 - rows))
grid.append(rows)
grid.append(columns)
return grid
def getGoal(grid) :
'''Asks user for goal, prints description of goal, and returns a number signifying that goal.
Parameter: grid = a list of two numbers, # of rows and columns.
This is only needed to validate user's selection of Goal 3 (Diagonal), where the grid must be n x n.'''
goal = getNum("Please select your goal for this round!\n1\tFull Card\n2\tSingle Line\n3\tDiagonal\n4\tFour Corners\n", 1, 4)
# Check if the goal is valid.
checkFlag = False
while not checkFlag :
if goal == 3 and grid[0] != grid[1] :
print "You can't use Diagonal as your goal because your grid is not n X n."
goal = getNum("Please select your goal for this round!\n1\tFull Card\n2\tSingle Line\n3\tDiagonal\n4\tFour Corners\n", 1, 4)
else :
checkFlag = True
# Print goal for user.
if goal == 1 :
print "\nYour goal is Full Card - all items must be found."
elif goal == 2 :
print "\nYour goal is Single Line - all items in a horizontal or vertical line must be found."
elif goal == 3 :
print "\nYour goal is Diagonal - all items in a diagonal line must be found."
else :
print "\nYour goal is Four Corners - the items in each of the four corners must be found."
return goal
def getName(num) :
'''Ask user for player name and return name as a string
Parameter: num = total number of players, used to identify which player is being named'''
name = raw_input("What is Player {}'s name? ".format(num))
return name
def makeCard(grid, itemList) :
'''Create a card for a player and reurn list of card items
Parameters: grid = list of row and column lengths; itemList = list of all available items to place in card'''
# For this function, column and row totals need to be at least 1 for the loop
# to fill card with items (even if user may have selected 0 rows or columns for their grid)
if grid[0] == 0 :
grid[0] = 1
elif grid[1] == 0 :
grid[1] == 1
# create list of unique, random numbers to randomly select items from ItemList
numList = []
for i in range(0, grid[0] * grid[1]) :
numList.append(random.choice(range(0, 54)))
card = []
k = 0 # to iterate through numList
for i in range(0, grid[0]) : # outer loop determines row
card.append([])
for j in range(0, grid[1]) : # inner loop populates each column within row
card[i].append(j)
card[i][j] = itemList[numList[k]]
k = k + 1
return card
def printCard(name, card) :
'''Print player card to screen with a tab between each item in a row and a break between each row
Parameters: name = string of player's name; card = list of lists (that is, rows -> columns) containing player's items'''
print "{}'s card:\n".format(name)
for i in range(0, len(card)) :
for j in range(0, len(card[i])) :
print "{}\t".format(card[i][j]),
print "\n"
def callItem(itemList) :
'''Call new item and then remove that item from the item list so it cannot be called again.
Returns the called item so that each player card can then be checked to see if it contains
that item.
Parameters: itemList = list of all available items to place in card'''
checkFlag = False
while checkFlag == False :
callItem = raw_input("Type 'y' to call a new item!")
if callItem == "y" :
checkFlag = True
num = random.randint(0, len(itemList) - 1) # random.randint generates a <= N <= b, so it requires
# len(itemList) - 1 to ensure it doesn't generate something too high
item = itemList.pop(num)
print "The new call item is {}!".format(item)
return item
def checkCard(item, card) :
'''Check if a card contains given item, and if so, replace that card item with "FOUND!". Returns updated card.
Parameters: item = string containing the called item; card = list of lists (i.e. rows -> columns) containing player's items'''
for i in range(0, len(card)) :
for j in range(0, len(card[i])) :
if card[i][j] == item :
card[i][j] = "FOUND!"
return card
def checkWinner(goal, card) :
'''Check if the card meets established goal condition. Returns Boolean signifying if player has won or not.
Parameters: goal = number signifying goal type, must be 1-4; card = list of lists (i.e. rows -> columns) containing player's items'''
winner = True
# Entire card
if goal == 1 :
for i in range(0, len(card)) :
for j in range(0, len(card[i])) :
if card[i][j] != "FOUND!" :
winner = False
# Single vertical or horizontal line
elif goal == 2 :
# check horizontal lines first
for i in range(0, len(card)) :
winner = True
for j in range(0, len(card[i])) :
if card[i][j] != "FOUND!" :
winner = False
if winner == True :
return winner
# then check vertical lines
row = 0
for j in range(len(card[row])) :
winner = True
for i in range(len(card)) :
if card[i][j] != "FOUND!" :
winner = False
row = row + 1
if winner == True :
return winner
# Diagonal line
elif goal == 3 :
for i in range(len(card)) : # check top left to bottom right diagonal
if card[i][i] != "FOUND!" :
winner = False
if winner == True :
return winner
for i in range(1, len(card) + 1) : # check top right to bottom left diagonal
if card[-i][-i] != "FOUND!" :
winner = False
# Four corners
elif goal == 4 :
if card[0][0] != "FOUND!" or card[0][-1] != "FOUND!" or card[-1][0] != "FOUND!" or card[-1][-1] != "FOUND!" :
winner = False
return winner
def declareWinners(winners, playerNames) :
''' Print winners.
Parameters: winners = list of True/False indicating winners; playerNames = list of player names.'''
for i in range(0, len(winners)) :
if winners[i] == True :
print "{} is a winner! Hooray!".format(playerNames[i])
def playAgain() :
''' Ask user if they would like to play again. Return Boolean.'''
playAgain = ""
keepPlaying = True
while playAgain != "y" and playAgain != "n" :
playAgain = raw_input("Would you like to play again? y/n")
if playAgain == "n" :
keepPlaying = False
print "Have a good trip!"
return keepPlaying
def main() :
''' main() drives the entire game. Execution begins here.'''
originalItems = loadItems()
# Loop for continuing play if desired
keepPlaying = True
while keepPlaying == True :
callerItems = originalItems[:] # create duplicate item list to preserve originals
# Game set-up: # of players, grid size, goal
numPlayers = getNum("Number of players (between 1-3): ", 1, 3)
grid = getGrid()
goal = getGoal(grid)
# Set player names, cards, win status
playerNames = []
playerCards = []
winners = []
for i in range(0, numPlayers) :
playerNames.append(getName(i + 1))
playerCards.append(makeCard(grid, callerItems))
printCard(playerNames[i], playerCards[i])
winners.append(False)
# Play until someone wins
while True not in winners :
item = callItem(callerItems)
for i in range(0, len(playerCards)) :
playerCards[i] = checkCard(item, playerCards[i])
printCard(playerNames[i], playerCards[i])
winners[i] = checkWinner(goal, playerCards[i])
declareWinners(winners, playerNames)
keepPlaying = playAgain()
main()
|
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# performance scenario configuration for various languages
import math
WARMUP_SECONDS=5
JAVA_WARMUP_SECONDS=15 # Java needs more warmup time for JIT to kick in.
BENCHMARK_SECONDS=30
SMOKETEST='smoketest'
SCALABLE='scalable'
SWEEP='sweep'
DEFAULT_CATEGORIES=[SCALABLE, SMOKETEST]
SECURE_SECARGS = {'use_test_ca': True,
'server_host_override': 'foo.test.google.fr'}
HISTOGRAM_PARAMS = {
'resolution': 0.01,
'max_possible': 60e9,
}
# target number of RPCs outstanding on across all client channels in
# non-ping-pong tests (since we can only specify per-channel numbers, the
# actual target will be slightly higher)
OUTSTANDING_REQUESTS={
'async': 6400,
'sync': 1000
}
# wide is the number of client channels in multi-channel tests (1 otherwise)
WIDE=64
def _get_secargs(is_secure):
if is_secure:
return SECURE_SECARGS
else:
return None
def remove_nonproto_fields(scenario):
"""Remove special-purpose that contains some extra info about the scenario
but don't belong to the ScenarioConfig protobuf message"""
scenario.pop('CATEGORIES', None)
scenario.pop('CLIENT_LANGUAGE', None)
scenario.pop('SERVER_LANGUAGE', None)
scenario.pop('EXCLUDED_POLL_ENGINES', None)
return scenario
def geometric_progression(start, stop, step):
n = start
while n < stop:
yield int(round(n))
n *= step
def _payload_type(use_generic_payload, req_size, resp_size):
r = {}
sizes = {
'req_size': req_size,
'resp_size': resp_size,
}
if use_generic_payload:
r['bytebuf_params'] = sizes
else:
r['simple_params'] = sizes
return r
def _ping_pong_scenario(name, rpc_type,
client_type, server_type,
secure=True,
use_generic_payload=False,
req_size=0,
resp_size=0,
unconstrained_client=None,
client_language=None,
server_language=None,
async_server_threads=0,
warmup_seconds=WARMUP_SECONDS,
categories=DEFAULT_CATEGORIES,
channels=None,
outstanding=None,
resource_quota_size=None,
excluded_poll_engines=[]):
"""Creates a basic ping pong scenario."""
scenario = {
'name': name,
'num_servers': 1,
'num_clients': 1,
'client_config': {
'client_type': client_type,
'security_params': _get_secargs(secure),
'outstanding_rpcs_per_channel': 1,
'client_channels': 1,
'async_client_threads': 1,
'rpc_type': rpc_type,
'load_params': {
'closed_loop': {}
},
'histogram_params': HISTOGRAM_PARAMS,
},
'server_config': {
'server_type': server_type,
'security_params': _get_secargs(secure),
'async_server_threads': async_server_threads,
},
'warmup_seconds': warmup_seconds,
'benchmark_seconds': BENCHMARK_SECONDS
}
if resource_quota_size:
scenario['server_config']['resource_quota_size'] = resource_quota_size
if use_generic_payload:
if server_type != 'ASYNC_GENERIC_SERVER':
raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
scenario['server_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
scenario['client_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
if unconstrained_client:
outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client]
# clamp buffer usage to something reasonable (16 gig for now)
MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size, resp_size))
wide = channels if channels is not None else WIDE
deep = int(math.ceil(1.0 * outstanding_calls / wide))
scenario['num_clients'] = 0 # use as many client as available.
scenario['client_config']['outstanding_rpcs_per_channel'] = deep
scenario['client_config']['client_channels'] = wide
scenario['client_config']['async_client_threads'] = 0
else:
scenario['client_config']['outstanding_rpcs_per_channel'] = 1
scenario['client_config']['client_channels'] = 1
scenario['client_config']['async_client_threads'] = 1
if client_language:
# the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
scenario['CLIENT_LANGUAGE'] = client_language
if server_language:
# the SERVER_LANGUAGE field is recognized by run_performance_tests.py
scenario['SERVER_LANGUAGE'] = server_language
if categories:
scenario['CATEGORIES'] = categories
if len(excluded_poll_engines):
# The polling engines for which this scenario is excluded
scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
return scenario
class CXXLanguage:
def __init__(self):
self.safename = 'cxx'
def worker_cmdline(self):
return ['bins/opt/qps_worker']
def worker_port_offset(self):
return 0
def scenarios(self):
# TODO(ctiller): add 70% load latency test
for secure in [True, False]:
secstr = 'secure' if secure else 'insecure'
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
yield _ping_pong_scenario(
'cpp_generic_async_streaming_ping_pong_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True, async_server_threads=1,
secure=secure,
categories=smoketest_categories)
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True,
secure=secure,
categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True,
async_server_threads=1,
secure=secure)
yield _ping_pong_scenario(
'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s' %
(secstr),
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
categories=smoketest_categories + [SCALABLE],
excluded_poll_engines = ['poll-cv'])
yield _ping_pong_scenario(
'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
categories=smoketest_categories+[SCALABLE],
excluded_poll_engines = ['poll-cv'])
for rpc_type in ['unary', 'streaming']:
for synchronicity in ['sync', 'async']:
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity, rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
async_server_threads=1,
secure=secure)
for size in geometric_progression(1, 1024*1024*1024+1, 8):
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' % (synchronicity, rpc_type, secstr, size),
rpc_type=rpc_type.upper(),
req_size=size,
resp_size=size,
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
categories=[SWEEP])
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%s' % (synchronicity, rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
categories=smoketest_categories+[SCALABLE],
resource_quota_size=500*1024)
for channels in geometric_progression(1, 20000, math.sqrt(10)):
for outstanding in geometric_progression(1, 200000, math.sqrt(10)):
if synchronicity == 'sync' and outstanding > 1200: continue
if outstanding < channels: continue
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding' % (synchronicity, rpc_type, secstr, channels, outstanding),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity, secure=secure,
categories=[SWEEP], channels=channels, outstanding=outstanding)
def __str__(self):
return 'c++'
class CSharpLanguage:
def __init__(self):
self.safename = str(self)
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_csharp.sh']
def worker_port_offset(self):
return 100
def scenarios(self):
yield _ping_pong_scenario(
'csharp_generic_async_streaming_ping_pong', rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
yield _ping_pong_scenario(
'csharp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
yield _ping_pong_scenario(
'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async',
categories=[SMOKETEST,SCALABLE])
yield _ping_pong_scenario(
'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async',
categories=[SCALABLE])
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
server_language='c++', async_server_threads=1,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
server_language='c++', async_server_threads=1)
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async', server_language='c++',
categories=[SCALABLE])
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='sync', server_language='c++',
categories=[SCALABLE])
yield _ping_pong_scenario(
'cpp_to_csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async', client_language='c++',
categories=[SCALABLE])
def __str__(self):
return 'csharp'
class NodeLanguage:
def __init__(self):
pass
self.safename = str(self)
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_node.sh',
'--benchmark_impl=grpc']
def worker_port_offset(self):
return 200
def scenarios(self):
# TODO(jtattermusch): make this scenario work
#yield _ping_pong_scenario(
# 'node_generic_async_streaming_ping_pong', rpc_type='STREAMING',
# client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
# use_generic_payload=True)
# TODO(jtattermusch): make this scenario work
#yield _ping_pong_scenario(
# 'node_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
# client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
yield _ping_pong_scenario(
'node_protobuf_unary_ping_pong', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
categories=[SCALABLE, SMOKETEST])
yield _ping_pong_scenario(
'cpp_to_node_unary_ping_pong', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='async_server',
client_language='c++')
yield _ping_pong_scenario(
'node_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async',
categories=[SCALABLE, SMOKETEST])
# TODO(jtattermusch): make this scenario work
#yield _ping_pong_scenario(
# 'node_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
# client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
# unconstrained_client='async')
# TODO(jtattermusch): make this scenario work
#yield _ping_pong_scenario(
# 'node_to_cpp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
# client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
# server_language='c++', async_server_threads=1)
# TODO(jtattermusch): make this scenario work
#yield _ping_pong_scenario(
# 'node_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
# client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
# server_language='c++', async_server_threads=1)
def __str__(self):
return 'node'
class PythonLanguage:
def __init__(self):
self.safename = 'python'
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_python.sh']
def worker_port_offset(self):
return 500
def scenarios(self):
yield _ping_pong_scenario(
'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
yield _ping_pong_scenario(
'python_protobuf_async_unary_ping_pong', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
yield _ping_pong_scenario(
'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario(
'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario(
'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
server_language='c++', async_server_threads=1,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
server_language='c++', async_server_threads=1)
def __str__(self):
return 'python'
class RubyLanguage:
def __init__(self):
pass
self.safename = str(self)
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_ruby.sh']
def worker_port_offset(self):
return 300
def scenarios(self):
yield _ping_pong_scenario(
'ruby_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'ruby_protobuf_unary_ping_pong', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'ruby_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario(
'ruby_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario(
'ruby_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
server_language='c++', async_server_threads=1)
yield _ping_pong_scenario(
'ruby_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
server_language='c++', async_server_threads=1)
def __str__(self):
return 'ruby'
class JavaLanguage:
def __init__(self):
pass
self.safename = str(self)
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_java.sh']
def worker_port_offset(self):
return 400
def scenarios(self):
for secure in [True, False]:
secstr = 'secure' if secure else 'insecure'
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
yield _ping_pong_scenario(
'java_generic_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True, async_server_threads=1,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories)
yield _ping_pong_scenario(
'java_protobuf_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
async_server_threads=1,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
yield _ping_pong_scenario(
'java_protobuf_async_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
async_server_threads=1,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories)
yield _ping_pong_scenario(
'java_protobuf_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
async_server_threads=1,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
yield _ping_pong_scenario(
'java_protobuf_async_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
categories=[SCALABLE])
yield _ping_pong_scenario(
'java_generic_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
categories=[SCALABLE])
yield _ping_pong_scenario(
'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True,
async_server_threads=1,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
# TODO(jtattermusch): add scenarios java vs C++
def __str__(self):
return 'java'
class GoLanguage:
def __init__(self):
pass
self.safename = str(self)
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_go.sh']
def worker_port_offset(self):
return 600
def scenarios(self):
for secure in [True, False]:
secstr = 'secure' if secure else 'insecure'
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
# ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
# but that's mostly because of lack of better name of the enum value.
yield _ping_pong_scenario(
'go_generic_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True, async_server_threads=1,
secure=secure,
categories=smoketest_categories)
yield _ping_pong_scenario(
'go_protobuf_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
async_server_threads=1,
secure=secure)
yield _ping_pong_scenario(
'go_protobuf_sync_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
async_server_threads=1,
secure=secure,
categories=smoketest_categories)
# unconstrained_client='async' is intended (client uses goroutines)
yield _ping_pong_scenario(
'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
categories=smoketest_categories+[SCALABLE])
# unconstrained_client='async' is intended (client uses goroutines)
yield _ping_pong_scenario(
'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
categories=[SCALABLE])
# unconstrained_client='async' is intended (client uses goroutines)
# ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
# but that's mostly because of lack of better name of the enum value.
yield _ping_pong_scenario(
'go_generic_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True,
secure=secure,
categories=[SCALABLE])
# TODO(jtattermusch): add scenarios go vs C++
def __str__(self):
return 'go'
class NodeExpressLanguage:
def __init__(self):
pass
self.safename = str(self)
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_node.sh',
'--benchmark_impl=express']
def worker_port_offset(self):
return 700
def scenarios(self):
yield _ping_pong_scenario(
'node_express_json_unary_ping_pong', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
categories=[SCALABLE, SMOKETEST])
yield _ping_pong_scenario(
'node_express_json_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
unconstrained_client='async',
categories=[SCALABLE, SMOKETEST])
def __str__(self):
return 'node_express'
LANGUAGES = {
'c++' : CXXLanguage(),
'csharp' : CSharpLanguage(),
'node' : NodeLanguage(),
'node_express': NodeExpressLanguage(),
'ruby' : RubyLanguage(),
'java' : JavaLanguage(),
'python' : PythonLanguage(),
'go' : GoLanguage(),
}
|
|
import copy
import bson
import datetime
import pymongo.errors
from . import consistencychecker
from . import containerutil
from .. import config
from .. import util
from ..web.errors import APIStorageException, APIConflictException, APINotFoundException
log = config.log
# TODO: Find a better place to put this until OOP where we can just call cont.children
CHILD_MAP = {
'groups': 'projects',
'projects': 'sessions',
'sessions': 'acquisitions'
}
PARENT_MAP = {v: k for k,v in CHILD_MAP.iteritems()}
# All "containers" are required to return these fields
# 'All' includes users
BASE_DEFAULTS = {
'_id': None,
'created': None,
'modified': None
}
# All containers that inherit from 'container' in the DM
CONTAINER_DEFAULTS = BASE_DEFAULTS.copy()
CONTAINER_DEFAULTS.update({
'permissions': [],
'files': [],
'notes': [],
'tags': [],
'info': {}
})
class ContainerStorage(object):
"""
This class provides access to mongodb collection elements (called containers).
It is used by ContainerHandler istances for get, create, update and delete operations on containers.
Examples: projects, sessions, acquisitions and collections
"""
def __init__(self, cont_name, use_object_id=False, use_delete_tag=False, parent_cont_name=None, child_cont_name=None):
self.cont_name = cont_name
self.parent_cont_name = parent_cont_name
self.child_cont_name = child_cont_name
self.use_object_id = use_object_id
self.use_delete_tag = use_delete_tag
self.dbc = config.db[cont_name]
@classmethod
def factory(cls, cont_name):
"""
Factory method to aid in the creation of a ContainerStorage instance
when cont_name is dynamic.
"""
cont_storage_name = containerutil.singularize(cont_name).capitalize() + 'Storage'
for subclass in cls.__subclasses__():
if subclass.__name__ == cont_storage_name:
return subclass()
return cls(containerutil.pluralize(cont_name))
@classmethod
def get_top_down_hierarchy(cls, cont_name, cid):
parent_to_child = {
'groups': 'projects',
'projects': 'sessions',
'sessions': 'acquisitions'
}
parent_tree = {
cont_name: [cid]
}
parent_name = cont_name
while parent_to_child.get(parent_name):
# Parent storage
storage = ContainerStorage.factory(parent_name)
child_name = parent_to_child[parent_name]
parent_tree[child_name] = []
# For each parent id, find all of its children and add them to the list of child ids in the parent tree
for parent_id in parent_tree[parent_name]:
parent_tree[child_name] = parent_tree[child_name] + [cont["_id"] for cont in storage.get_children_legacy(parent_id, projection={'_id':1})]
parent_name = child_name
return parent_tree
def _fill_default_values(self, cont):
if cont:
defaults = BASE_DEFAULTS.copy()
if self.cont_name not in ['groups', 'users']:
defaults = CONTAINER_DEFAULTS.copy()
for k,v in defaults.iteritems():
cont.setdefault(k, v)
def get_container(self, _id, projection=None, get_children=False):
cont = self.get_el(_id, projection=projection)
if cont is None:
raise APINotFoundException('Could not find {} {}'.format(self.cont_name, _id))
if get_children:
children = self.get_children(_id, projection=projection)
cont[containerutil.pluralize(self.child_cont_name)] = children
return cont
def get_children_legacy(self, _id, projection=None, uid=None):
"""
A get_children method that returns sessions from the project level rather than subjects.
Will be removed when Subject completes it's transition to a stand alone collection.
"""
try:
child_name = CHILD_MAP[self.cont_name]
except KeyError:
raise APIStorageException('Children cannot be listed from the {0} level'.format(self.cont_name))
if not self.use_object_id:
query = {containerutil.singularize(self.cont_name): _id}
else:
query = {containerutil.singularize(self.cont_name): bson.ObjectId(_id)}
if uid:
query['permissions'] = {'$elemMatch': {'_id': uid}}
if not projection:
projection = {'info': 0, 'files.info': 0, 'subject': 0, 'tags': 0}
return ContainerStorage.factory(child_name).get_all_el(query, None, projection)
def get_children(self, _id, query=None, projection=None, uid=None):
child_name = self.child_cont_name
if not child_name:
raise APIStorageException('Children cannot be listed from the {0} level'.format(self.cont_name))
if not query:
query = {}
if not self.use_object_id:
query[containerutil.singularize(self.cont_name)] = _id
else:
query[containerutil.singularize(self.cont_name)] = bson.ObjectId(_id)
if uid:
query['permissions'] = {'$elemMatch': {'_id': uid}}
if not projection:
projection = {'info': 0, 'files.info': 0, 'subject': 0, 'tags': 0}
return ContainerStorage.factory(child_name).get_all_el(query, None, projection)
def get_parent_tree(self, _id, cont=None, projection=None, add_self=False):
parents = []
curr_storage = self
if not cont:
cont = self.get_container(_id, projection=projection)
if add_self:
# Add the referenced container to the list
cont['cont_type'] = self.cont_name
parents.append(cont)
# Walk up the hierarchy until we cannot go any further
while True:
try:
parent = curr_storage.get_parent(cont['_id'], cont=cont, projection=projection)
except (APINotFoundException, APIStorageException):
# We got as far as we could, either we reached the top of the hierarchy or we hit a dead end with a missing parent
break
curr_storage = ContainerStorage.factory(curr_storage.parent_cont_name)
parent['cont_type'] = curr_storage.cont_name
parents.append(parent)
if curr_storage.parent_cont_name:
cont = parent
else:
break
return parents
def get_parent(self, _id, cont=None, projection=None):
if not cont:
cont = self.get_container(_id, projection=projection)
if self.parent_cont_name:
ps = ContainerStorage.factory(self.parent_cont_name)
parent = ps.get_container(cont[self.parent_cont_name], projection=projection)
return parent
else:
raise APIStorageException('The container level {} has no parent.'.format(self.cont_name))
def _from_mongo(self, cont):
pass
def _to_mongo(self, payload):
pass
def exec_op(self, action, _id=None, payload=None, query=None, user=None,
public=False, projection=None, recursive=False, r_payload=None, # pylint: disable=unused-argument
replace_metadata=False, unset_payload=None):
"""
Generic method to exec a CRUD operation from a REST verb.
"""
check = consistencychecker.get_container_storage_checker(action, self.cont_name)
data_op = payload or {'_id': _id}
check(data_op)
if action == 'GET' and _id:
return self.get_el(_id, projection=projection, fill_defaults=True)
if action == 'GET':
return self.get_all_el(query, user, projection, fill_defaults=True)
if action == 'DELETE':
return self.delete_el(_id)
if action == 'PUT':
return self.update_el(_id, payload, unset_payload=unset_payload, recursive=recursive, r_payload=r_payload, replace_metadata=replace_metadata)
if action == 'POST':
return self.create_el(payload)
raise ValueError('action should be one of GET, POST, PUT, DELETE')
def create_el(self, payload):
self._to_mongo(payload)
try:
result = self.dbc.insert_one(payload)
except pymongo.errors.DuplicateKeyError:
raise APIConflictException('Object with id {} already exists.'.format(payload['_id']))
return result
def update_el(self, _id, payload, unset_payload=None, recursive=False, r_payload=None, replace_metadata=False):
replace = None
if replace_metadata:
replace = {}
if payload.get('info') is not None:
replace['info'] = util.mongo_sanitize_fields(payload.pop('info'))
if payload.get('subject') is not None and payload['subject'].get('info') is not None:
replace['subject.info'] = util.mongo_sanitize_fields(payload['subject'].pop('info'))
update = {}
if payload is not None:
self._to_mongo(payload)
update['$set'] = util.mongo_dict(payload)
if unset_payload is not None:
update['$unset'] = util.mongo_dict(unset_payload)
if replace is not None:
update['$set'].update(replace)
if self.use_object_id:
try:
_id = bson.ObjectId(_id)
except bson.errors.InvalidId as e:
raise APIStorageException(e.message)
if recursive and r_payload is not None:
containerutil.propagate_changes(self.cont_name, _id, {}, {'$set': util.mongo_dict(r_payload)})
return self.dbc.update_one({'_id': _id}, update)
def delete_el(self, _id):
if self.use_object_id:
try:
_id = bson.ObjectId(_id)
except bson.errors.InvalidId as e:
raise APIStorageException(e.message)
if self.use_delete_tag:
return self.dbc.update_one({'_id': _id}, {'$set': {'deleted': datetime.datetime.utcnow()}})
return self.dbc.delete_one({'_id':_id})
def get_el(self, _id, projection=None, fill_defaults=False):
if self.use_object_id:
try:
_id = bson.ObjectId(_id)
except bson.errors.InvalidId as e:
raise APIStorageException(e.message)
cont = self.dbc.find_one({'_id': _id, 'deleted': {'$exists': False}}, projection)
self._from_mongo(cont)
if fill_defaults:
self._fill_default_values(cont)
if cont is not None and cont.get('files', []):
cont['files'] = [f for f in cont['files'] if 'deleted' not in f]
return cont
def get_all_el(self, query, user, projection, fill_defaults=False):
if query is None:
query = {}
if user:
if query.get('permissions'):
query['$and'] = [{'permissions': {'$elemMatch': user}}, {'permissions': query.pop('permissions')}]
else:
query['permissions'] = {'$elemMatch': user}
query['deleted'] = {'$exists': False}
# if projection includes files.info, add new key `info_exists` and allow reserved info keys through
if projection and ('info' in projection or 'files.info' in projection or 'subject.info' in projection):
projection = copy.deepcopy(projection)
replace_info_with_bool = True
projection.pop('subject.info', None)
projection.pop('files.info', None)
projection.pop('info', None)
# Replace with None if empty (empty projections only return ids)
if not projection:
projection = None
else:
replace_info_with_bool = False
results = list(self.dbc.find(query, projection))
for cont in results:
if cont.get('files', []):
cont['files'] = [f for f in cont['files'] if 'deleted' not in f]
self._from_mongo(cont)
if fill_defaults:
self._fill_default_values(cont)
if replace_info_with_bool:
info = cont.pop('info', {})
cont['info_exists'] = bool(info)
cont['info'] = containerutil.sanitize_info(info)
if cont.get('subject'):
s_info = cont['subject'].pop('info', {})
cont['subject']['info_exists'] = bool(s_info)
cont['subject']['info'] = containerutil.sanitize_info(s_info)
for f in cont.get('files', []):
f_info = f.pop('info', {})
f['info_exists'] = bool(f_info)
f['info'] = containerutil.sanitize_info(f_info)
return results
def modify_info(self, _id, payload, modify_subject=False):
# Support modification of subject info
# Can be removed when subject becomes a standalone container
info_key = 'subject.info' if modify_subject else 'info'
update = {}
set_payload = payload.get('set')
delete_payload = payload.get('delete')
replace_payload = payload.get('replace')
if (set_payload or delete_payload) and replace_payload is not None:
raise APIStorageException('Cannot set or delete AND replace info fields.')
if replace_payload is not None:
update = {
'$set': {
info_key: util.mongo_sanitize_fields(replace_payload)
}
}
else:
if set_payload:
update['$set'] = {}
for k,v in set_payload.items():
update['$set'][info_key + '.' + k] = util.mongo_sanitize_fields(v)
if delete_payload:
update['$unset'] = {}
for k in delete_payload:
update['$unset'][info_key + '.' + k] = ''
if self.use_object_id:
_id = bson.objectid.ObjectId(_id)
query = {'_id': _id }
if not update.get('$set'):
update['$set'] = {'modified': datetime.datetime.utcnow()}
else:
update['$set']['modified'] = datetime.datetime.utcnow()
return self.dbc.update_one(query, update)
|
|
#
# @file tools/sdk/python/MotionSDK.py
# @author Luke Tokheim, [email protected]
# @version 2.2
#
# Copyright (c) 2015, Motion Workshop
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import functools
import select
import socket
import struct
import sys
#
# Only load functools in Python version 2.5 or newer.
#
if sys.version_info >= (2, 5):
import functools
class Client:
"""
Implements socket connection and basic binary message protocol for
client application access to all Motion Service streams. Use the static
Format methods to convert a binary message into the associated object.
"""
def __init__(self, host, port):
"""
Create client socket connection to the Motion Service data stream
on host:port.
"""
self.__socket = None
self.__recv_flags = 0
self.__send_flags = 0
self.__description = None
self.__time_out_second = None
self.__time_out_second_send = None
# Set the default host name to the local host.
if (None == host) or (0 == len(host)):
host = "127.0.0.1"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
self.__socket = s
# Set the MSG_WAITALL flag if it exists for this platform
try:
self.__recv_flags |= socket.MSG_WAITALL
except AttributeError:
pass
# Read the first message from the service. It is a
# string description of the remote service.
self.__description = self.__receive()
def __del__(self):
"""
Destructor. Close the socket connection.
"""
self.close()
def close(self):
"""
Close the socket connection if it exists.
"""
if None != self.__socket:
self.__socket.shutdown(2)
self.__socket.close()
self.__socket = None
def isConnected(self):
"""
Return true if the current connection is active.
"""
if None != self.__socket:
return True
else:
return False
def waitForData(self, time_out_second=None):
"""
Wait until there is incoming data on this client
connection and then returns True.
"""
# Default time out is 5 seconds.
if None == time_out_second:
time_out_second = 5
if time_out_second != self.__time_out_second:
self.__socket.settimeout(time_out_second)
self.__time_out_second = self.__socket.gettimeout()
data = self.__receive()
if None != data:
return True
else:
return False
def readData(self, time_out_second=None):
"""
Read a single sample of data from the open connection.
Returns a single sample of data, or None if the incoming
data is invalid.
"""
if None == self.__socket:
return None
# Default time out is 1 second.
if None == time_out_second:
time_out_second = 1
if time_out_second != self.__time_out_second:
self.__socket.settimeout(time_out_second)
self.__time_out_second = self.__socket.gettimeout()
return self.__receive()
def writeData(self, data, time_out_second=None):
"""
Write a single sample of data to the open connection.
Returns True iff the message was successfully written
to the socket. Otherwise returns False.
"""
if None == self.__socket:
return False
if len(data) <= 0:
return False
# Default time out is 1 second.
if None == time_out_second:
time_out_second = 1
if time_out_second != self.__time_out_second_send:
self.__socket.settimeout(time_out_second)
self.__time_out_second_send = self.__socket.gettimeout()
return self.__send(data)
def __receive(self):
"""
Read a single binary message defined by a length header.
"""
if None == self.__socket:
return None
if False == self.__select_receive():
return None
try:
header_size = struct.calcsize("!I")
# Single integer network order (=big-endian) message length header.
header = self.__socket.recv(header_size, self.__recv_flags)
if header_size != len(header):
return None
# Parse the length field, read the raw data field.
length = struct.unpack("!I", header)[0]
data = self.__socket.recv(length, self.__recv_flags)
if length != len(data):
return None
return data
except socket.timeout:
pass
return None
def __send(self, data):
"""
Write a single binary message defined by a length header.
Returns true iff the message was successfully written
to the socket.
"""
if None == self.__socket:
return False
if False == self.__select_send():
return None
try:
# Convert Python 3 strings to byte string.
if not isinstance(data, bytes):
data = data.encode("utf-8")
length = len(data)
message = struct.pack("!I" + str(length) + "s", length, data)
send_result = self.__socket.sendall(message, self.__send_flags)
if None == send_result:
return True
except socket.timeout:
pass
return False
def __select_receive(self):
"""
Use the select function to wait until there is data available to read
on the internal socket. Returns True iff there is at least one byte
ready to be read.
"""
fd = self.__socket.fileno()
try:
list, _, _ = select.select(
[fd], [], [], self.__time_out_second_send)
for s in list:
if fd == s:
return True
except socket.timeout:
pass
return False
def __select_send(self):
"""
Use the select function to wait until there data can be written to the
internal socket object.
"""
fd = self.__socket.fileno()
try:
_, list, _ = select.select(
[], [fd], [], self.__time_out_second_send)
for s in list:
if fd == s:
return True
except socket.timeout:
pass
return False
#
# END class Client
#
class File:
"""
Implements a file input stream interface for reading Motion Service
binary take data files. Provide a simple interface to develop
external applications that can read Motion take data from disk.
This class only handles the reading of binary data and conversion
to arrays of native data types. The Format class implements
interfaces to the service specific data formats.
"""
def __init__(self, pathname):
"""
Open a Motion take data file for reading.
Set parameter pathname to the file to open as the input stream.
"""
self.__input = None
self.__input = open(pathname, "rb")
def __del__(self):
"""
Destrucutor. Close the input file stream.
"""
try:
self.close()
except RuntimeError:
pass
def close(self):
"""
Close the input file stream.
Throws a RuntimeError if the file stream is not open.
"""
if None != self.__input:
self.__input.close()
self.__input = None
else:
raise RuntimeError("failed to close input file stream, not open")
def readData(self, length, real_valued):
"""
Read a single block of binary data from the current position
in the input file stream. Convert the block of data into an
array of length typed elements.
Integer parameter length defines the required number of typed elements.
Set boolean parameter real_valued to True if the typed elements are
real valued, i.e. floats. Set real_valued to false for short integers.
"""
if None == self.__input:
return None
data = None
if length > 0:
# Choose the binary format of the array values,
# "f" == float and "h" == short.
value_format = "f"
if False == real_valued:
value_format = "h"
element_size = length * struct.calcsize("<" + str(value_format))
input_buffer = self.__input.read(element_size)
if element_size == len(input_buffer):
data = struct.unpack(
"<" + str(length) + str(value_format), input_buffer)
else:
self.close()
return data
#
# END class File
#
class Format:
"""
Motion Service streams send a list of data elements. The static Format
methods create a map from integer id to array packed data for each
service specific format.
"""
class Element:
"""
Motion Service streams send a list of data elements. The {@link Format}
functions create a map from integer id to array packed data for each
service specific format.
This is an abstract base class to implement a single format specific
data element. The idea is that a child class implements a format
specific interface (API) to access individual components of an array of
packed data.
For example, the PreviewElement class extends this class
and provides a PreviewElement.getEuler() method to access
an array of {x, y, z} Euler angles.
"""
def __init__(self, data, length, real_valued):
"""
Initialize element data.
"""
self.__data = None
self.__real_valued = None
if (len(data) == length) or (0 == length):
self.__data = data
self.__real_valued = real_valued
else:
raise RuntimeError("invalid input data for format element")
def getData(self, base, length):
"""
Utility function to copy portions of the packed data array into its
component elements.
Parameter base defines starting index to copy data from the
internal data array.
Parameter element_length defines the number of data values in this
component element.
Returns an array of element_length values, assigned to
[m_data[i] ... m_data[i+element_length]] if there are valid values
available or zeros otherwise
"""
if (None != self.__data) and (base + length <= len(self.__data)):
return self.__data[base:(base + length)]
else:
value = float(0)
if False == real_valued:
value = int(0)
result = list()
for i in range(0, length):
result.append(value)
def access(self):
"""
Direct access to the internal buffer.
"""
return self.__data
#
# END class Element
#
class ConfigurableElement(Element):
"""
The Configurable data services provides access to all data streams in
a single message. The client selects channels and ordering at the
start of the connection. The Configurable service sends a map of
N data elements. Each data element is an array of M single precision
floating point numbers.
"""
def __init__(self, data):
"""
Defines the parameters of this element.
"""
Format.Element.__init__(self, data, 0, True)
def value(self, index):
"""
Get a single channel entry at specified index.
"""
return self.access()[index]
def size(self):
"""
Convenience method. Size accessor.
"""
return len(self.access())
#
# END class ConfigurableElement
#
class PreviewElement(Element):
"""
The Preview service sends a map of N Preview data elements. Use this
class to wrap a single Preview data element such that we can access
individual components through a simple API.
Preview element format:
id => [global quaternion, local quaternion, local euler, acceleration]
id => [
Gqw, Gqx, Gqy, Gqz, Lqw, Lqx, Lqy, Lqz, rx, ry, rz, lax, lay, laz
]
"""
def __init__(self, data):
"""
Defines the parameters of this element.
"""
Format.Element.__init__(self, data, 14, True)
def getEuler(self):
"""
Get a set of x, y, and z Euler angles that define the
current orientation. Specified in radians assuming x-y-z
rotation order. Not necessarily continuous over time, each
angle lies on the domain [-pi, pi].
Euler angles are computed on the server side based on the
current local quaternion orientation.
Returns a three element array [x, y, z] of Euler angles
in radians or None if there is no available data
"""
return self.getData(8, 3)
def getMatrix(self, local):
"""
Get a 4-by-4 rotation matrix from the current global or local
quaternion orientation. Specified as a 16 element array in
row-major order.
Set parameter local to true get the local orientation, set local
to false to get the global orientation.
"""
return Format.quaternion_to_R3_rotation(self.getQuaternion(local))
def getQuaternion(self, local):
"""
Get the global or local unit quaternion that defines the current
orientation.
@param local set local to true get the local orientation, set local
to false to get the global orientation
Returns a four element array [w, x, y, z] that defines
a unit length quaternion q = w + x*i + y*j + z*k or None
if there is no available data
"""
if (local):
return self.getData(4, 4)
else:
return self.getData(0, 4)
def getAccelerate(self):
"""
Get x, y, and z of the current estimate of linear acceleration.
Specified in g.
Returns a three element array [x, y, z] of of linear acceleration
channels specified in g or zeros if there is no available data
"""
return self.getData(11, 3)
#
# END class PreviewElement
#
class SensorElement(Element):
"""
The Sensor service provides access to the current un-filtered sensor
signals in real units. The Sensor service sends a map of N data
elements. Use this class to wrap a single Sensor data element such
that we can access individual components through a simple API.
Sensor element format:
id => [accelerometer, magnetometer, gyroscope]
id => [ax, ay, az, mx, my, mz, gx, gy, gz]
"""
def __init__(self, data):
"""
Initialize this container identifier with a packed data
array in the Sensor format.
Parameter data is a packed array of accelerometer, magnetometer,
and gyroscope un-filtered signal data.
"""
Format.Element.__init__(self, data, 9, True)
def getAccelerometer(self):
"""
Get a set of x, y, and z values of the current un-filtered
accelerometer signal. Specified in g where 1 g =
-9.8 meters/sec^2.
Domain varies with configuration. Maximum is [-6, 6] g.
Returns a three element array [x, y, z] of acceleration
in gs or zeros if there is no available data
"""
return self.getData(0, 3)
def getGyroscope(self):
"""
Get a set of x, y, and z values of the current un-filtered
gyroscope signal. Specified in degrees/second.
Valid domain is [-500, 500] degrees/second.
Returns a three element array [x, y, z] of angular velocity
in degrees/second or zeros if there is no available data.
"""
return self.getData(6, 3)
def getMagnetometer(self):
"""
Get a set of x, y, and z values of the current un-filtered
magnetometer signal. Specified in uT (microtesla).
Domain varies with local magnetic field strength. Expect values
on [-60, 60] uT (microtesla).
Returns a three element array [x, y, z] of magnetic field
strength in uT (microtesla) or zeros if there is no
available data.
"""
return self.getData(3, 3)
#
# class SensorElement
#
class RawElement(Element):
"""
The Raw service provides access to the current uncalibrated,
unprocessed sensor signals in signed integer format. The Raw service
sends a map of N data elements. Use this class to wrap a single Raw
data element such that we can access individual components through a
simple API.
Raw element format:
id => [accelerometer, magnetometer, gyroscope]
id => [ax, ay, az, mx, my, mz, gx, gy, gz]
All sensors output 12-bit integers. Process as 16-bit short integers on
the server side.
"""
def __init__(self, data):
"""
Initialize this container identifier with a packed data
array in the Raw format.
Parameter data is a packed array of accelerometer, magnetometer,
and gyroscope un-filtered signal data.
"""
Format.Element.__init__(self, data, 9, False)
def getAccelerometer(self):
"""
Get a set of x, y, and z values of the current unprocessed
accelerometer signal.
Valid domain is [0, 4095].
Returns a three element array [x, y, z] of raw accelerometer
output or zeros if there is no available data.
"""
return self.getData(0, 3)
def getGyroscope(self):
"""
Get a set of x, y, and z values of the current unprocessed
gyroscope signal.
Valid domain is [0, 4095].
Returns a three element array [x, y, z] of raw gyroscope
output or zeros if there is no available data.
"""
return self.getData(6, 3)
def getMagnetometer(self):
"""
Get a set of x, y, and z values of the current unprocessed
magnetometer signal.
Valid domain is [0, 4095].
Returns a three element array [x, y, z] of raw magnetometer
output or zeros if there is no available data.
"""
return self.getData(3, 3)
#
# class RawElement
#
def __Configurable(data):
"""
Convert a container of binary data into an associative
container of ConfigurableElement entries.
"""
return Format.__IdToValueArray(
data, 0, Format.ConfigurableElement, True)
Configurable = staticmethod(__Configurable)
def __Preview(data):
"""
Convert a container of binary data into an associative
container of PreviewElement entries.
"""
return Format.__IdToValueArray(data, 14, Format.PreviewElement, True)
Preview = staticmethod(__Preview)
def __Sensor(data):
"""
Convert a container of binary data into an associative
container of SensorElement entries.
"""
return Format.__IdToValueArray(data, 9, Format.SensorElement, True)
Sensor = staticmethod(__Sensor)
def __Raw(data):
"""
Convert a container of binary data into an associative
container of RawElement entries.
"""
return Format.__IdToValueArray(data, 9, Format.RawElement, False)
Raw = staticmethod(__Raw)
def __IdToValueArray(data, length, factory, real_valued):
"""
Utility method to convert a packed binary representation of
an associative container into that container.
"""
result = {}
if None == data:
return result
# Choose the binary format of the array values,
# "f" == float and "h" == short.
value_format = "f"
if False == real_valued:
value_format = "h"
# Prefix "<" for little-endian byte ordering.
sizeof_key = struct.calcsize("<I")
sizeof_value = struct.calcsize("<" + str(value_format))
itr = 0
while (itr < len(data)) and ((len(data) - itr) > sizeof_key):
# Read the integer id for this element.
key = struct.unpack("<I", data[itr:itr + sizeof_key])[0]
itr += sizeof_key
# Optionally read the integer length field of the
# data array.
element_length = length
if (0 == element_length) and ((len(data) - itr) > sizeof_key):
element_length = struct.unpack(
"<I", data[itr:itr + sizeof_key])[0]
itr += sizeof_key
# Read the array of values for this element.
sizeof_array = sizeof_value * element_length
if (element_length > 0) and ((len(data) - itr) >= sizeof_array):
value = struct.unpack(
str(element_length) + value_format,
data[itr:itr + sizeof_array])
itr += sizeof_array
result[key] = factory(value)
# If we did not consume all of the input bytes this is an
# invalid message.
if len(data) != itr:
result = {}
return result
__IdToValueArray = staticmethod(__IdToValueArray)
def quaternion_to_R3_rotation(q):
"""
Ported from the Boost.Quaternion library at:
http://www.boost.org/libs/math/quaternion/HSO3.hpp
Parameter q defines a quaternion in the format [w x y z] where
q = w + x*i + y*j + z*k.
Returns an array of 16 elements that defines a 4-by-4 rotation
matrix computed from the input quaternion or the identity matrix
if the input quaternion has zero length. Matrix is in row-major
order.
"""
if 4 != len(q):
return None
a = q[0]
b = q[1]
c = q[2]
d = q[3]
aa = a * a
ab = a * b
ac = a * c
ad = a * d
bb = b * b
bc = b * c
bd = b * d
cc = c * c
cd = c * d
dd = d * d
norme_carre = aa + bb + cc + dd
result = list()
for i in range(0, 4):
for j in range(0, 4):
if i == j:
result.append(1)
else:
result.append(0)
if (norme_carre > 1e-6):
result[0] = (aa + bb - cc - dd) / norme_carre
result[1] = 2 * (-ad + bc) / norme_carre
result[2] = 2 * (ac + bd) / norme_carre
result[4] = 2 * (ad + bc) / norme_carre
result[5] = (aa - bb + cc - dd) / norme_carre
result[6] = 2 * (-ab + cd) / norme_carre
result[8] = 2 * (-ac + bd) / norme_carre
result[9] = 2 * (ab + cd) / norme_carre
result[10] = (aa - bb - cc + dd) / norme_carre
return result
quaternion_to_R3_rotation = staticmethod(quaternion_to_R3_rotation)
#
# END class Format
#
class LuaConsole:
"""
Implements the communication protocol with the Motion Service console.
Send general Lua scripting commands to the Motion Service and receive
a result code and printed results.
"""
# The Lua chunk was successfully parsed and executed. The
# printed results are in the result string.
Success = 0
# The Lua chunk failed due to a compile time or execution
# time error. An error description is in the result string.
Failure = 1
# The Lua chunk was incomplete. The Console service is waiting
# for a complete chunk before it executes.
# For example, "if x > 1 then" is incomplete since it requires
# and "end" token to close the "if" control statement.
Continue = 2
def __init__(self, client):
"""
Constructor. Supply the already open client socket connection
to the Motion Service console..
"""
self.__client = client
def send_chunk(self, chunk, time_out_second=None):
"""
Write a general Lua chunk to the open Console service
socket and read back the results.
"""
result_code = self.Failure
result_string = None
# Write the Lua chunk.
if self.__client.writeData(chunk, time_out_second):
# Read back the response. This is how the Lua Console
# service works. It will always respond with at least
# an error code.
data = self.__client.readData(time_out_second)
if None != data and len(data) > 0:
if not isinstance(data, str):
data = str(data, "utf-8")
code = ord(data[0])
if code >= self.Success and code <= self.Continue:
result_code = code
if len(data) > 1:
result_string = data[1:]
return result_code, result_string
def __SendChunk(client, chunk, time_out_second=None):
"""
A more Python friendly version of the SendChunk method.
This will throw an exception if there is an error in the
scripting command. Otherwise, this will only return the
printed results.
"""
lua_console = LuaConsole(client)
result_code, result_string = lua_console.send_chunk(
chunk, time_out_second)
if lua_console.Success == result_code:
return result_string
elif lua_console.Continue == result_code:
raise RuntimeError(
"Lua chunk incomplete: " + str(result_string))
else:
raise RuntimeError(
"Lua command chunk failed: " + str(result_string))
SendChunk = staticmethod(__SendChunk)
class Node:
"""
Utility class to implement a generic scripting interface
from the Motion Service console (Lua) to Python and vice versa.
Dispatch named methods with variable length argument
lists.
Implements all Lua node.* methods that return a boolean
and string value pair. Also supports simple string result
but the client script must handle this correctly.
Example usage:
node = LuaConsole.Node(Client("", 32075))
result, message = node.start()
if result:
# Success. We are reading from at least one device.
pass
else:
# Failure. There are no configured devices or the
# hardware is not available.
print message
"""
def __init__(self, client):
self.__client = client
def __getattr__(self, name):
if sys.version_info >= (2, 5):
return functools.partial(self.__dispatch, name)
else:
return None
def __dispatch(self, name, *arg_list):
result = self.__string_call(name, *arg_list)
if result.startswith("true"):
return True, result[4:]
elif result.startswith("false"):
return False, result[5:]
else:
return str(result)
def __string_call(self, name, *arg_list):
lua_call = "=node.%s(" % name
# Create a string valued argument list from a variable
# length list of arguments. Note that this only supports
# String and Float valued arguments.
sep = ""
for item in arg_list:
if isinstance(item, str):
lua_call += "%s%s" % (sep, "".join(
["'", ("\\'").join(i for i in item.split("'")), "'"]))
else:
lua_call += "%s%s" % (sep, float(item))
sep = ", "
lua_call += ")"
return LuaConsole.SendChunk(self.__client, lua_call, 5)
#
# END class Node
#
#
#
# END class LuaConsole
#
def main():
"""
Example usage and test function for the Client, File,
Format, and LuaConsole classes.
"""
# Open take data file in the Sensor format.
# Print out the calibrated gyroscope signals.
DataFile = "../../test_data/sensor.bin"
if None != DataFile:
take_file = File(DataFile)
while True:
data = take_file.readData(9, True)
if None == data:
break
print("%s\n" % str(Format.SensorElement(data).getGyroscope()))
# Set the default host and port parameters. The SDK is
# socket bases so any networked Motion Service is available.
Host = ""
PortPreview = 32079
PortConsole = 32075
#
# General Lua scripting interface.
#
lua_client = Client(Host, PortConsole)
lua_chunk = \
"if not node.is_reading() then" \
" node.close()" \
" node.scan()" \
" node.start()" \
" end" \
" if node.is_reading() then" \
" print('Reading from ' .. node.num_reading() .. ' device(s)')" \
" else" \
" print('Failed to start reading')" \
" end"
print(LuaConsole.SendChunk(lua_client, lua_chunk, 5))
# Scripting language compatibility class. Translate
# Python calls into Lua calls and send them to the
# console service.
if sys.version_info >= (2, 5):
node = LuaConsole.Node(lua_client)
print(node.is_reading())
# Connect to the Preview data service.
# Print out the Euler angle orientation output.
client = Client(Host, PortPreview)
print("Connected to %s:%d" % (Host, PortPreview))
if client.waitForData():
sample_count = 0
while sample_count < 100:
data = client.readData()
preview = Format.Preview(data)
if len(preview) > 0:
for item in preview.values():
print("Euler = %s" % str(item.getEuler()))
break
else:
break
sample_count += 1
else:
print("No current data available, giving up")
client.close()
if __name__ == "__main__":
sys.exit(main())
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates JavaScript source files from a mojom.Module."""
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
from mojom.generate.template_expander import UseJinja
_kind_to_javascript_default_value = {
mojom.BOOL: "false",
mojom.INT8: "0",
mojom.UINT8: "0",
mojom.INT16: "0",
mojom.UINT16: "0",
mojom.INT32: "0",
mojom.UINT32: "0",
mojom.FLOAT: "0",
mojom.HANDLE: "null",
mojom.DCPIPE: "null",
mojom.DPPIPE: "null",
mojom.MSGPIPE: "null",
mojom.SHAREDBUFFER: "null",
mojom.NULLABLE_HANDLE: "null",
mojom.NULLABLE_DCPIPE: "null",
mojom.NULLABLE_DPPIPE: "null",
mojom.NULLABLE_MSGPIPE: "null",
mojom.NULLABLE_SHAREDBUFFER: "null",
mojom.INT64: "0",
mojom.UINT64: "0",
mojom.DOUBLE: "0",
mojom.STRING: "null",
mojom.NULLABLE_STRING: "null"
}
def JavaScriptType(kind):
if kind.imported_from:
return kind.imported_from["unique_name"] + "." + kind.name
return kind.name
def JavaScriptDefaultValue(field):
if field.default:
if mojom.IsStructKind(field.kind):
assert field.default == "default"
return "new %s()" % JavaScriptType(field.kind)
return ExpressionToText(field.default)
if field.kind in mojom.PRIMITIVES:
return _kind_to_javascript_default_value[field.kind]
if mojom.IsStructKind(field.kind):
return "null"
if mojom.IsArrayKind(field.kind):
return "null"
if mojom.IsMapKind(field.kind):
return "null"
if mojom.IsInterfaceKind(field.kind) or \
mojom.IsInterfaceRequestKind(field.kind):
return _kind_to_javascript_default_value[mojom.MSGPIPE]
if mojom.IsEnumKind(field.kind):
return "0"
def JavaScriptPayloadSize(packed):
packed_fields = packed.packed_fields
if not packed_fields:
return 0
last_field = packed_fields[-1]
offset = last_field.offset + last_field.size
pad = pack.GetPad(offset, 8)
return offset + pad
_kind_to_codec_type = {
mojom.BOOL: "codec.Uint8",
mojom.INT8: "codec.Int8",
mojom.UINT8: "codec.Uint8",
mojom.INT16: "codec.Int16",
mojom.UINT16: "codec.Uint16",
mojom.INT32: "codec.Int32",
mojom.UINT32: "codec.Uint32",
mojom.FLOAT: "codec.Float",
mojom.HANDLE: "codec.Handle",
mojom.DCPIPE: "codec.Handle",
mojom.DPPIPE: "codec.Handle",
mojom.MSGPIPE: "codec.Handle",
mojom.SHAREDBUFFER: "codec.Handle",
mojom.NULLABLE_HANDLE: "codec.NullableHandle",
mojom.NULLABLE_DCPIPE: "codec.NullableHandle",
mojom.NULLABLE_DPPIPE: "codec.NullableHandle",
mojom.NULLABLE_MSGPIPE: "codec.NullableHandle",
mojom.NULLABLE_SHAREDBUFFER: "codec.NullableHandle",
mojom.INT64: "codec.Int64",
mojom.UINT64: "codec.Uint64",
mojom.DOUBLE: "codec.Double",
mojom.STRING: "codec.String",
mojom.NULLABLE_STRING: "codec.NullableString",
}
def CodecType(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_codec_type[kind]
if mojom.IsStructKind(kind):
pointer_type = "NullablePointerTo" if mojom.IsNullableKind(kind) \
else "PointerTo"
return "new codec.%s(%s)" % (pointer_type, JavaScriptType(kind))
if mojom.IsArrayKind(kind):
array_type = "NullableArrayOf" if mojom.IsNullableKind(kind) else "ArrayOf"
array_length = "" if kind.length is None else ", %d" % kind.length
element_type = ElementCodecType(kind.kind)
return "new codec.%s(%s%s)" % (array_type, element_type, array_length)
if mojom.IsInterfaceKind(kind):
return "codec.%s" % ("NullableInterface" if mojom.IsNullableKind(kind)
else "Interface")
if mojom.IsInterfaceRequestKind(kind):
return CodecType(mojom.MSGPIPE)
if mojom.IsEnumKind(kind):
return _kind_to_codec_type[mojom.INT32]
if mojom.IsMapKind(kind):
map_type = "NullableMapOf" if mojom.IsNullableKind(kind) else "MapOf"
key_type = ElementCodecType(kind.key_kind)
value_type = ElementCodecType(kind.value_kind)
return "new codec.%s(%s, %s)" % (map_type, key_type, value_type)
return kind
def ElementCodecType(kind):
return "codec.PackedBool" if mojom.IsBoolKind(kind) else CodecType(kind)
def JavaScriptDecodeSnippet(kind):
if kind in mojom.PRIMITIVES:
return "decodeStruct(%s)" % CodecType(kind)
if mojom.IsStructKind(kind):
return "decodeStructPointer(%s)" % JavaScriptType(kind)
if mojom.IsMapKind(kind):
return "decodeMapPointer(%s, %s)" % \
(ElementCodecType(kind.key_kind), ElementCodecType(kind.value_kind))
if mojom.IsArrayKind(kind) and mojom.IsBoolKind(kind.kind):
return "decodeArrayPointer(codec.PackedBool)"
if mojom.IsArrayKind(kind):
return "decodeArrayPointer(%s)" % CodecType(kind.kind)
if mojom.IsInterfaceKind(kind):
return "decodeStruct(%s)" % CodecType(kind)
if mojom.IsInterfaceRequestKind(kind):
return JavaScriptDecodeSnippet(mojom.MSGPIPE)
if mojom.IsEnumKind(kind):
return JavaScriptDecodeSnippet(mojom.INT32)
def JavaScriptEncodeSnippet(kind):
if kind in mojom.PRIMITIVES:
return "encodeStruct(%s, " % CodecType(kind)
if mojom.IsStructKind(kind):
return "encodeStructPointer(%s, " % JavaScriptType(kind)
if mojom.IsMapKind(kind):
return "encodeMapPointer(%s, %s, " % \
(ElementCodecType(kind.key_kind), ElementCodecType(kind.value_kind))
if mojom.IsArrayKind(kind) and mojom.IsBoolKind(kind.kind):
return "encodeArrayPointer(codec.PackedBool, ";
if mojom.IsArrayKind(kind):
return "encodeArrayPointer(%s, " % CodecType(kind.kind)
if mojom.IsInterfaceKind(kind):
return "encodeStruct(%s, " % CodecType(kind)
if mojom.IsInterfaceRequestKind(kind):
return JavaScriptEncodeSnippet(mojom.MSGPIPE)
if mojom.IsEnumKind(kind):
return JavaScriptEncodeSnippet(mojom.INT32)
def JavaScriptFieldOffset(packed_field):
return "offset + codec.kStructHeaderSize + %s" % packed_field.offset
def JavaScriptNullableParam(packed_field):
return "true" if mojom.IsNullableKind(packed_field.field.kind) else "false"
def GetArrayExpectedDimensionSizes(kind):
expected_dimension_sizes = []
while mojom.IsArrayKind(kind):
expected_dimension_sizes.append(generator.ExpectedArraySize(kind) or 0)
kind = kind.kind
# Strings are serialized as variable-length arrays.
if (mojom.IsStringKind(kind)):
expected_dimension_sizes.append(0)
return expected_dimension_sizes
def JavaScriptValidateArrayParams(packed_field):
nullable = JavaScriptNullableParam(packed_field)
field_offset = JavaScriptFieldOffset(packed_field)
element_kind = packed_field.field.kind.kind
element_size = pack.PackedField.GetSizeForKind(element_kind)
expected_dimension_sizes = GetArrayExpectedDimensionSizes(
packed_field.field.kind)
element_type = ElementCodecType(element_kind)
return "%s, %s, %s, %s, %s, 0" % \
(field_offset, element_size, element_type, nullable,
expected_dimension_sizes)
def JavaScriptValidateStructParams(packed_field):
nullable = JavaScriptNullableParam(packed_field)
field_offset = JavaScriptFieldOffset(packed_field)
struct_type = JavaScriptType(packed_field.field.kind)
return "%s, %s, %s" % (field_offset, struct_type, nullable)
def JavaScriptValidateMapParams(packed_field):
nullable = JavaScriptNullableParam(packed_field)
field_offset = JavaScriptFieldOffset(packed_field)
keys_type = ElementCodecType(packed_field.field.kind.key_kind)
values_kind = packed_field.field.kind.value_kind;
values_type = ElementCodecType(values_kind)
values_nullable = "true" if mojom.IsNullableKind(values_kind) else "false"
return "%s, %s, %s, %s, %s" % \
(field_offset, nullable, keys_type, values_type, values_nullable)
def JavaScriptValidateStringParams(packed_field):
nullable = JavaScriptNullableParam(packed_field)
return "%s, %s" % (JavaScriptFieldOffset(packed_field), nullable)
def JavaScriptValidateHandleParams(packed_field):
nullable = JavaScriptNullableParam(packed_field)
field_offset = JavaScriptFieldOffset(packed_field)
return "%s, %s" % (field_offset, nullable)
def JavaScriptValidateInterfaceParams(packed_field):
return JavaScriptValidateHandleParams(packed_field)
def JavaScriptProxyMethodParameterValue(parameter):
name = parameter.name;
if (mojom.IsInterfaceKind(parameter.kind)):
type = JavaScriptType(parameter.kind)
return "core.isHandle(%s) ? %s : connection.bindImpl" \
"(%s, %s)" % (name, name, name, type)
if (mojom.IsInterfaceRequestKind(parameter.kind)):
type = JavaScriptType(parameter.kind.kind)
return "core.isHandle(%s) ? %s : connection.bindProxy" \
"(%s, %s)" % (name, name, name, type)
return name;
def JavaScriptStubMethodParameterValue(parameter):
name = parameter.name;
if (mojom.IsInterfaceKind(parameter.kind)):
type = JavaScriptType(parameter.kind)
return "connection.bindHandleToProxy(%s, %s)" % (name, type)
if (mojom.IsInterfaceRequestKind(parameter.kind)):
type = JavaScriptType(parameter.kind.kind)
return "connection.bindHandleToStub(%s, %s)" % (name, type)
return name;
def TranslateConstants(token):
if isinstance(token, (mojom.EnumValue, mojom.NamedValue)):
# Both variable and enum constants are constructed like:
# NamespaceUid.Struct[.Enum].CONSTANT_NAME
name = []
if token.imported_from:
name.append(token.imported_from["unique_name"])
if token.parent_kind:
name.append(token.parent_kind.name)
if isinstance(token, mojom.EnumValue):
name.append(token.enum.name)
name.append(token.name)
return ".".join(name)
if isinstance(token, mojom.BuiltinValue):
if token.value == "double.INFINITY" or token.value == "float.INFINITY":
return "Infinity";
if token.value == "double.NEGATIVE_INFINITY" or \
token.value == "float.NEGATIVE_INFINITY":
return "-Infinity";
if token.value == "double.NAN" or token.value == "float.NAN":
return "NaN";
return token
def ExpressionToText(value):
return TranslateConstants(value)
def IsArrayPointerField(field):
return mojom.IsArrayKind(field.kind)
def IsStringPointerField(field):
return mojom.IsStringKind(field.kind)
def IsStructPointerField(field):
return mojom.IsStructKind(field.kind)
def IsMapPointerField(field):
return mojom.IsMapKind(field.kind)
def IsHandleField(field):
return mojom.IsAnyHandleKind(field.kind)
def IsInterfaceField(field):
return mojom.IsInterfaceKind(field.kind)
class Generator(generator.Generator):
js_filters = {
"default_value": JavaScriptDefaultValue,
"payload_size": JavaScriptPayloadSize,
"decode_snippet": JavaScriptDecodeSnippet,
"encode_snippet": JavaScriptEncodeSnippet,
"expression_to_text": ExpressionToText,
"field_offset": JavaScriptFieldOffset,
"has_callbacks": mojom.HasCallbacks,
"is_array_pointer_field": IsArrayPointerField,
"is_map_pointer_field": IsMapPointerField,
"is_struct_pointer_field": IsStructPointerField,
"is_string_pointer_field": IsStringPointerField,
"is_handle_field": IsHandleField,
"is_interface_field": IsInterfaceField,
"js_type": JavaScriptType,
"js_proxy_method_parameter_value": JavaScriptProxyMethodParameterValue,
"js_stub_method_parameter_value": JavaScriptStubMethodParameterValue,
"stylize_method": generator.StudlyCapsToCamel,
"validate_array_params": JavaScriptValidateArrayParams,
"validate_handle_params": JavaScriptValidateHandleParams,
"validate_interface_params": JavaScriptValidateInterfaceParams,
"validate_map_params": JavaScriptValidateMapParams,
"validate_string_params": JavaScriptValidateStringParams,
"validate_struct_params": JavaScriptValidateStructParams,
}
def GetParameters(self):
return {
"namespace": self.module.namespace,
"imports": self.GetImports(),
"kinds": self.module.kinds,
"enums": self.module.enums,
"module": self.module,
"structs": self.GetStructs() + self.GetStructsFromMethods(),
"interfaces": self.module.interfaces,
"imported_interfaces": self.GetImportedInterfaces(),
}
@UseJinja("js_templates/module.amd.tmpl", filters=js_filters)
def GenerateAMDModule(self):
return self.GetParameters()
def GenerateFiles(self, args):
self.Write(self.GenerateAMDModule(),
self.MatchMojomFilePath("%s.js" % self.module.name))
def GetImports(self):
used_names = set()
for each_import in self.module.imports:
simple_name = each_import["module_name"].split(".")[0]
# Since each import is assigned a variable in JS, they need to have unique
# names.
unique_name = simple_name
counter = 0
while unique_name in used_names:
counter += 1
unique_name = simple_name + str(counter)
used_names.add(unique_name)
each_import["unique_name"] = unique_name + "$"
counter += 1
return self.module.imports
def GetImportedInterfaces(self):
interface_to_import = {};
for each_import in self.module.imports:
for each_interface in each_import["module"].interfaces:
name = each_interface.name
interface_to_import[name] = each_import["unique_name"] + "." + name
return interface_to_import;
|
|
__author__ = 'Sarath'
from pyNN import *
import time
from pyNN.optimization.optimization import *
from pyNN.util.Initializer import *
import pickle
class DeepCorrNet1(object):
def init(self, numpy_rng, theano_rng=None, l_rate=0.01, optimization="sgd", tied=False, n_visible_left=None, n_visible_right=None, n_hidden=None, n_hidden2=None, lamda=5, W_left=None, W_right=None, b_left=None, b_right=None, W_left_prime=None, W_right_prime=None, b_prime_left=None, b_prime_right=None, W_left2=None, W_right2=None, b2=None, W_left_prime2=None, W_right_prime2=None, b_prime_left2=None, b_prime_right2=None, input_left=None, input_right=None, hidden_activation="sigmoid", output_activation="sigmoid", loss_fn = "squarrederror", op_folder=None):
self.numpy_rng = numpy_rng
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.theano_rng = theano_rng
self.optimization = optimization
self.l_rate = l_rate
self.optimizer = get_optimizer(self.optimization, self.l_rate)
self.Initializer = Initializer(self.numpy_rng)
self.n_visible_left = n_visible_left
self.n_visible_right = n_visible_right
self.n_hidden = n_hidden
self.n_hidden2 = n_hidden2
self.lamda = lamda
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.loss_fn = loss_fn
self.tied = tied
self.op_folder = op_folder
self.W_left = self.Initializer.fan_based_sigmoid("W_left", W_left, n_visible_left, n_hidden)
self.optimizer.register_variable("W_left",n_visible_left,n_hidden)
self.W_right = self.Initializer.fan_based_sigmoid("W_right", W_right, n_visible_right, n_hidden)
self.optimizer.register_variable("W_right",n_visible_right,n_hidden)
self.W_left2 = self.Initializer.fan_based_sigmoid("W_left2", W_left2, n_hidden, n_hidden2)
self.optimizer.register_variable("W_left2",n_hidden, n_hidden2)
self.W_right2 = self.Initializer.fan_based_sigmoid("W_right2", W_right2, n_hidden, n_hidden2)
self.optimizer.register_variable("W_right2", n_hidden, n_hidden2)
if not tied:
self.W_left_prime = self.Initializer.fan_based_sigmoid("W_left_prime", W_left_prime, n_hidden, n_visible_left)
self.optimizer.register_variable("W_left_prime",n_hidden, n_visible_left)
self.W_right_prime = self.Initializer.fan_based_sigmoid("W_right_prime", W_right_prime, n_hidden, n_visible_right)
self.optimizer.register_variable("W_right_prime",n_hidden, n_visible_right)
self.W_left_prime2 = self.Initializer.fan_based_sigmoid("W_left_prime2", W_left_prime2, n_hidden2, n_hidden)
self.optimizer.register_variable("W_left_prime2",n_hidden2, n_hidden)
self.W_right_prime2 = self.Initializer.fan_based_sigmoid("W_right_prime2", W_right_prime2, n_hidden2, n_hidden)
self.optimizer.register_variable("W_right_prime2",n_hidden2, n_hidden)
else:
self.W_left_prime = self.W_left.T
self.W_right_prime = self.W_right.T
self.W_left_prime2 = self.W_left2.T
self.W_right_prime2 = self.W_right2.T
self.b_left = self.Initializer.zero_vector("b_left", b_left, n_hidden)
self.optimizer.register_variable("b_left",1,n_hidden)
self.b_right = self.Initializer.zero_vector("b_right", b_right, n_hidden)
self.optimizer.register_variable("b_right",1,n_hidden)
self.b_prime_left = self.Initializer.zero_vector("b_prime_left", b_prime_left, n_visible_left)
self.optimizer.register_variable("b_prime_left",1,n_visible_left)
self.b_prime_right = self.Initializer.zero_vector("b_prime_right", b_prime_right, n_visible_right)
self.optimizer.register_variable("b_prime_right",1,n_visible_right)
self.b2 = self.Initializer.zero_vector("b2", b2, n_hidden2)
self.optimizer.register_variable("b2",1,n_hidden2)
self.b_prime_left2 = self.Initializer.zero_vector("b_prime_left2", b_prime_left2, n_hidden)
self.optimizer.register_variable("b_prime_left2",1,n_hidden)
self.b_prime_right2 = self.Initializer.zero_vector("b_prime_right2", b_prime_right2, n_hidden)
self.optimizer.register_variable("b_prime_right2",1,n_hidden)
if input_left is None:
self.x_left = T.matrix(name='x_left')
else:
self.x_left = input_left
if input_right is None:
self.x_right = T.matrix(name='x_right')
else:
self.x_right = input_right
if tied:
self.params = [self.W_left, self.W_right, self.b_left, self.b_right, self.b_prime_left, self.b_prime_right, self.W_left2, self.W_right2, self.b2, self.b_prime_left2, self.b_prime_right2]
self.param_names = ["W_left", "W_right", "b_left", "b_right", "b_prime_left", "b_prime_right", "W_left2", "W_right2", "b2", "b_prime_left2", "b_prime_right2"]
else:
self.params = [self.W_left, self.W_right, self.b_left, self.b_right, self.b_prime_left, self.b_prime_right, self.W_left_prime, self.W_right_prime, self.W_left2, self.W_right2, self.b2, self.b_prime_left2, self.b_prime_right2, self.W_left_prime2, self.W_right_prime2]
self.param_names = ["W_left", "W_right", "b_left", "b_right", "b_prime_left", "b_prime_right", "W_left_prime", "W_right_prime", "W_left2", "W_right2", "b2", "b_prime_left2", "b_prime_right2", "W_left_prime2", "W_right_prime2"]
self.proj_from_left = theano.function([self.x_left],self.project_from_left())
self.proj_from_right = theano.function([self.x_right],self.project_from_right())
self.recon_from_left = theano.function([self.x_left],self.reconstruct_from_left())
self.recon_from_right = theano.function([self.x_right],self.reconstruct_from_right())
self.save_params()
def train_common(self,mtype="1111"):
y1_pre = T.dot(self.x_left, self.W_left) + self.b_left
y1 = activation(y1_pre, self.hidden_activation)
yy1_pre = T.dot(y1, self.W_left2) + self.b2
yy1 = activation(yy1_pre, self.hidden_activation)
z1_left_pre = T.dot(yy1, self.W_left_prime2) + self.b_prime_left2
z1_right_pre = T.dot(yy1,self.W_right_prime2) + self.b_prime_right2
z1_left = activation(z1_left_pre, self.output_activation)
z1_right = activation(z1_right_pre, self.output_activation)
zz1_left_pre = T.dot(z1_left, self.W_left_prime) + self.b_prime_left
zz1_right_pre = T.dot(z1_right,self.W_right_prime) + self.b_prime_right
zz1_left = activation(zz1_left_pre, self.output_activation)
zz1_right = activation(zz1_right_pre, self.output_activation)
L1 = loss(zz1_left, self.x_left, self.loss_fn) + loss(zz1_right, self.x_right, self.loss_fn)
y2_pre = T.dot(self.x_right, self.W_right) + self.b_right
y2 = activation(y2_pre, self.hidden_activation)
yy2_pre = T.dot(y2, self.W_right2) + self.b2
yy2 = activation(yy2_pre, self.hidden_activation)
z2_left_pre = T.dot(yy2, self.W_left_prime2) + self.b_prime_left2
z2_right_pre = T.dot(yy2,self.W_right_prime2) + self.b_prime_right2
z2_left = activation(z2_left_pre, self.output_activation)
z2_right = activation(z2_right_pre, self.output_activation)
zz2_left_pre = T.dot(z2_left, self.W_left_prime) + self.b_prime_left
zz2_right_pre = T.dot(z2_right,self.W_right_prime) + self.b_prime_right
zz2_left = activation(zz2_left_pre, self.output_activation)
zz2_right = activation(zz2_right_pre, self.output_activation)
L2 = loss(zz2_left, self.x_left, self.loss_fn) + loss(zz2_right, self.x_right, self.loss_fn)
y3left_pre = T.dot(self.x_left, self.W_left) + self.b_left
y3right_pre = T.dot(self.x_right, self.W_right) + self.b_right
y3left = activation(y3left_pre, self.hidden_activation)
y3right = activation(y3right_pre, self.hidden_activation)
y3_pre = T.dot(y3left, self.W_left2) + T.dot(y3right, self.W_right2) + self.b2
y3 = activation(y3_pre, self.hidden_activation)
z3_left_pre = T.dot(y3, self.W_left_prime2) + self.b_prime_left2
z3_right_pre = T.dot(y3,self.W_right_prime2) + self.b_prime_right2
z3_left = activation(z3_left_pre, self.output_activation)
z3_right = activation(z3_right_pre, self.output_activation)
zz3_left_pre = T.dot(z3_left, self.W_left_prime) + self.b_prime_left
zz3_right_pre = T.dot(z3_right,self.W_right_prime) + self.b_prime_right
zz3_left = activation(zz3_left_pre, self.output_activation)
zz3_right = activation(zz3_right_pre, self.output_activation)
L3 = loss(zz3_left, self.x_left, self.loss_fn) + loss(zz3_right, self.x_right, self.loss_fn)
y1_mean = T.mean(yy1, axis=0)
y1_centered = yy1 - y1_mean
y2_mean = T.mean(yy2, axis=0)
y2_centered = yy2 - y2_mean
corr_nr = T.sum(y1_centered * y2_centered, axis=0)
corr_dr1 = T.sqrt(T.sum(y1_centered * y1_centered, axis=0)+1e-8)
corr_dr2 = T.sqrt(T.sum(y2_centered * y2_centered, axis=0)+1e-8)
corr_dr = corr_dr1 * corr_dr2
corr = corr_nr/corr_dr
L4 = T.sum(corr) * self.lamda
if mtype=="1111":
print "1111"
L = L1 + L2 + L3 - L4
elif mtype=="1110":
print "1110"
L = L1 + L2 + L3
elif mtype=="1101":
print "1101"
L = L1 + L2 - L4
elif mtype == "0011":
print "0011"
L = L3 - L4
elif mtype == "1100":
print "1100"
L = L1 + L2
elif mtype == "0010":
print "0010"
L = L3
cost = T.mean(L)
gradients = T.grad(cost, self.params)
updates = []
for p,g,n in zip(self.params, gradients, self.param_names):
gr, upd = self.optimizer.get_grad_update(n,g)
updates.append((p,p+gr))
updates.extend(upd)
return cost, updates
def project_from_left(self):
y1_pre = T.dot(self.x_left, self.W_left) + self.b_left
y1 = activation(y1_pre, self.hidden_activation)
yy1_pre = T.dot(y1, self.W_left2) + self.b2
yy1 = activation(yy1_pre, self.hidden_activation)
return yy1
def project_from_right(self):
y2_pre = T.dot(self.x_right, self.W_right) + self.b_right
y2 = activation(y2_pre, self.hidden_activation)
yy2_pre = T.dot(y2, self.W_right2) + self.b2
yy2 = activation(yy2_pre, self.hidden_activation)
return yy2
def reconstruct_from_left(self):
y1_pre = T.dot(self.x_left, self.W_left) + self.b_left
y1 = activation(y1_pre, self.hidden_activation)
yy1_pre = T.dot(y1, self.W_left2) + self.b2
yy1 = activation(yy1_pre, self.hidden_activation)
z1_left_pre = T.dot(yy1, self.W_left_prime2) + self.b_prime_left2
z1_right_pre = T.dot(yy1,self.W_right_prime2) + self.b_prime_right2
z1_left = activation(z1_left_pre, self.output_activation)
z1_right = activation(z1_right_pre, self.output_activation)
zz1_left_pre = T.dot(z1_left, self.W_left_prime) + self.b_prime_left
zz1_right_pre = T.dot(z1_right,self.W_right_prime) + self.b_prime_right
zz1_left = activation(zz1_left_pre, self.output_activation)
zz1_right = activation(zz1_right_pre, self.output_activation)
return zz1_left, zz1_right
def reconstruct_from_right(self):
y2_pre = T.dot(self.x_right, self.W_right) + self.b_right
y2 = activation(y2_pre, self.hidden_activation)
yy2_pre = T.dot(y2, self.W_right2) + self.b2
yy2 = activation(yy2_pre, self.hidden_activation)
z2_left_pre = T.dot(yy2, self.W_left_prime2) + self.b_prime_left2
z2_right_pre = T.dot(yy2,self.W_right_prime2) + self.b_prime_right2
z2_left = activation(z2_left_pre, self.output_activation)
z2_right = activation(z2_right_pre, self.output_activation)
zz2_left_pre = T.dot(z2_left, self.W_left_prime) + self.b_prime_left
zz2_right_pre = T.dot(z2_right,self.W_right_prime) + self.b_prime_right
zz2_left = activation(zz2_left_pre, self.output_activation)
zz2_right = activation(zz2_right_pre, self.output_activation)
return zz2_left, zz2_right
def get_lr_rate(self):
return self.optimizer.get_l_rate()
def set_lr_rate(self,new_lr):
self.optimizer.set_l_rate(new_lr)
def save_matrices(self):
for p,nm in zip(self.params, self.param_names):
numpy.save(self.op_folder+nm, p.get_value(borrow=True))
def save_params(self):
params = {}
params["optimization"] = self.optimization
params["l_rate"] = self.l_rate
params["n_visible_left"] = self.n_visible_left
params["n_visible_right"] = self.n_visible_right
params["n_hidden"] = self.n_hidden
params["n_hidden2"] = self.n_hidden2
params["lamda"] = self.lamda
params["hidden_activation"] = self.hidden_activation
params["output_activation"] = self.output_activation
params["loss_fn"] = self.loss_fn
params["tied"] = self.tied
params["numpy_rng"] = self.numpy_rng
params["theano_rng"] = self.theano_rng
pickle.dump(params,open(self.op_folder+"params.pck","wb"),-1)
def load(self, folder, input_left=None, input_right=None):
plist = pickle.load(open(folder+"params.pck","rb"))
print plist["n_hidden"]
print type(plist["n_hidden"])
self.init(plist["numpy_rng"], theano_rng=plist["theano_rng"], l_rate=plist["l_rate"], optimization=plist["optimization"],
tied=plist["tied"], n_visible_left=plist["n_visible_left"], n_visible_right=plist["n_visible_right"], n_hidden=plist["n_hidden"], n_hidden2=plist["n_hidden2"],
lamda=plist["lamda"], W_left=folder+"W_left", W_right=folder+"W_right", b_left=folder+"b_left", b_right=folder+"b_right", W_left_prime=folder+"W_left_prime",
W_right_prime=folder+"W_right_prime", b_prime_left=folder+"b_prime_left", b_prime_right=folder+"b_prime_right",
W_left2=folder+"W_left2", W_right2=folder+"W_right2", b2=folder+"b2", W_left_prime2=folder+"W_left_prime2",
W_right_prime2=folder+"W_right_prime2", b_prime_left2=folder+"b_prime_left2", b_prime_right2=folder+"b_prime_right2",
input_left=input_left, input_right=input_right, hidden_activation=plist["hidden_activation"], output_activation=plist["output_activation"],
loss_fn = plist["loss_fn"], op_folder=folder)
def trainCorrNet(src_folder, sct_folder, tgt_folder, batch_size = 20, training_epochs=40, use_valid=False, l_rate=0.01, optimization="sgd", tied=False, n_visible_left=None, n_visible_right=None, n_hidden=None, n_hidden2=None, lamda=5, W_left=None, W_right=None, b_left=None, b_right=None, W_left_prime=None, W_right_prime=None, b_prime_left=None, b_prime_right=None, W_left2=None, W_right2=None, b2=None, W_left_prime2=None, W_right_prime2=None, b_prime_left2=None, b_prime_right2=None, hidden_activation="sigmoid", output_activation="sigmoid", loss_fn = "squarrederror"):
index = T.lscalar()
x_left = T.matrix('x_left')
x_right = T.matrix('x_right')
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
model = DeepCorrNet1()
model.init(numpy_rng=rng, theano_rng=theano_rng, l_rate=l_rate, optimization=optimization, tied=tied, n_visible_left=n_visible_left, n_visible_right=n_visible_right, n_hidden=n_hidden,n_hidden2=n_hidden2, lamda=lamda, W_left=W_left, W_right=W_right, b_left=b_left, b_right=b_right, W_left_prime=W_left_prime, W_right_prime=W_right_prime, b_prime_left=b_prime_left, b_prime_right=b_prime_right, W_left2=W_left2, W_right2=W_right2, b2=b2, W_left_prime2=W_left_prime2, W_right_prime2=W_right_prime2, b_prime_left2=b_prime_left2, b_prime_right2=b_prime_right2, input_left=x_left, input_right=x_right, hidden_activation=hidden_activation, output_activation=output_activation, loss_fn =loss_fn, op_folder=tgt_folder)
#model.load(tgt_folder,x_left,x_right)
start_time = time.clock()
train_set_x_left = theano.shared(numpy.asarray(numpy.zeros((1000,n_visible_left)), dtype=theano.config.floatX), borrow=True)
train_set_x_right = theano.shared(numpy.asarray(numpy.zeros((1000,n_visible_right)), dtype=theano.config.floatX), borrow=True)
common_cost, common_updates = model.train_common("1111")
mtrain_common = theano.function([index], common_cost,updates=common_updates,givens=[(x_left, train_set_x_left[index * batch_size:(index + 1) * batch_size]),(x_right, train_set_x_right[index * batch_size:(index + 1) * batch_size])])
"""left_cost, left_updates = model.train_left()
mtrain_left = theano.function([index], left_cost,updates=left_updates,givens=[(x_left, train_set_x_left[index * batch_size:(index + 1) * batch_size])])
right_cost, right_updates = model.train_right()
mtrain_right = theano.function([index], right_cost,updates=right_updates,givens=[(x_right, train_set_x_right[index * batch_size:(index + 1) * batch_size])])"""
diff = 0
flag = 1
detfile = open(tgt_folder+"details.txt","w")
detfile.close()
oldtc = float("inf")
for epoch in xrange(training_epochs):
print "in epoch ", epoch
c = []
ipfile = open(src_folder+"train/ip.txt","r")
for line in ipfile:
next = line.strip().split(",")
if(next[0]=="xy"):
if(next[1]=="dense"):
denseTheanoloader(next[2]+"_left",train_set_x_left,"float32")
denseTheanoloader(next[2]+"_right",train_set_x_right, "float32")
else:
sparseTheanoloader(next[2]+"_left",train_set_x_left,"float32",1000,n_visible_left)
sparseTheanoloader(next[2]+"_right",train_set_x_right, "float32", 1000, n_visible_right)
for batch_index in range(0,int(next[3])/batch_size):
c.append(mtrain_common(batch_index))
if(flag==1):
flag = 0
diff = numpy.mean(c)
di = diff
else:
di = numpy.mean(c) - diff
diff = numpy.mean(c)
print 'Difference between 2 epochs is ', di
print 'Training epoch %d, cost ' % epoch, diff
ipfile.close()
detfile = open(tgt_folder+"details.txt","a")
detfile.write("train\t"+str(diff)+"\n")
detfile.close()
# save the parameters for every 5 epochs
if((epoch+1)%5==0):
model.save_matrices()
end_time = time.clock()
training_time = (end_time - start_time)
print ' code ran for %.2fm' % (training_time / 60.)
model.save_matrices()
|
|
# Copyright (c) 2010 Infrae / Technical University Delft. All rights reserved.
# See also LICENSE.txt
from collections import defaultdict
from lxml import etree
from fcrepo.utils import rdfxml2dict, dict2rdfxml
class typedproperty(property):
def __init__(self, fget, fset=None, fdel=None, doc=None, pytype=None):
# like a normal property, but converts types to/from strings
def typed_get(self):
if pytype is bool:
value = fget(self)
if isinstance(value, bool):
return value
return fget(self) == 'true'
return pytype(fget(self))
def typed_set(self, value):
# we don't change the type here, this is done in wadl client
# otherwise the wadl client can't determine the correct type
return fset(self, value)
super(typedproperty, self).__init__(typed_get, typed_set, fdel, doc)
class FedoraDatastream(object):
def __init__(self, dsid, object):
self.object = object
self.dsid = dsid
self._info = self.object.client.getDatastreamProfile(self.object.pid,
self.dsid)
def delete(self, **params):
self.object.client.deleteDatastream(self.object.pid,
self.dsid,
**params)
self.object._dsids = None
def getContent(self):
return self.object.client.getDatastream(self.object.pid, self.dsid)
def setContent(self, data='', **params):
if self._info['controlGroup'] == 'X':
# for some reason we need to add 2 characters to the body
# or we get a parsing error in fedora
data += '\r\n'
self.object.client.modifyDatastream(self.object.pid,
self.dsid,
data,
**params)
self._info = self.object.client.getDatastreamProfile(self.object.pid,
self.dsid)
def _setProperty(self, name, value):
msg = u'Changed %s datastream property' % name
name = {'label': 'dsLabel',
'location': 'dsLocation',
'state': 'dsState'}.get(name, name)
params = {name: value, 'logMessage': msg, 'ignoreContent': True}
self.object.client.modifyDatastream(self.object.pid,
self.dsid,
**params)
self._info = self.object.client.getDatastreamProfile(self.object.pid,
self.dsid)
label = property(lambda self: self._info['label'],
lambda self, value: self._setProperty('label', value))
location = property(lambda self: self._info['location'],
lambda self, value: self._setProperty('location', value))
state = property(lambda self: self._info['state'],
lambda self, value: self._setProperty('state',
value))
checksumType = property(lambda self: self._info['checksumType'],
lambda self, value: self._setProperty('checksumType',
value))
versionId = property(lambda self: self._info['versionId'],
lambda self, value: self._setProperty('versionId',
value))
mimeType = property(lambda self: self._info['mimeType'],
lambda self, value: self._setProperty('mimeType',
value))
formatURI = property(lambda self: self._info['formatURI'],
lambda self, value: self._setProperty('formatURI',
value))
versionable = typedproperty(lambda self: self._info['versionable'],
lambda self, value: self._setProperty(
'versionable', value), pytype=bool)
# read only
createdDate = property(lambda self: self._info['createdDate'])
controlGroup = property(lambda self: self._info['controlGroup'])
size = typedproperty(lambda self: self._info['size'], pytype=int)
checksum = property(lambda self: self._info['formatURI'])
class RELSEXTDatastream(FedoraDatastream):
def __init__(self, dsid, object):
super(RELSEXTDatastream, self).__init__(dsid, object)
self._rdf = None
def _get_rdf(self):
if self._rdf is None:
rdfxml = self.getContent().read()
self._rdf = rdfxml2dict(rdfxml)
return self._rdf
def keys(self):
rdf = self._get_rdf()
keys = rdf.keys()
keys.sort()
return keys
predicates = keys
def setContent(self, data='', **params):
if not data:
rdf = self._get_rdf()
data = dict2rdfxml(self.object.pid, rdf)
self._rdf = None
super(RELSEXTDatastream, self).setContent(data, **params)
def __setitem__(self, key, value):
rdf = self._get_rdf()
rdf[key]=value
def __getitem__(self, key):
rdf = self._get_rdf()
return rdf[key]
def __delitem__(self, key):
rdf = self._get_rdf()
del rdf[key]
def __contains__(self, key):
rdf = self._get_rdf()
return key in rdf
def __iter__(self):
rdf = self._get_rdf()
return rdf.__iter__()
class DCDatastream(FedoraDatastream):
def __init__(self, dsid, object):
super(DCDatastream, self).__init__(dsid, object)
self._dc = None
def _get_dc(self):
if self._dc is None:
xml = self.getContent().read()
doc = etree.fromstring(xml)
self._dc = defaultdict(list)
for child in doc:
name = child.tag.split('}')[-1]
value = child.text
if value is None:
continue
if not isinstance(value, unicode):
value = value.decode('utf8')
self._dc[name].append(value)
return self._dc
def keys(self):
dc = self._get_dc()
keys = dc.keys()
keys.sort()
return keys
properties = keys
def setContent(self, data='', **params):
if not data:
dc = self._get_dc()
nsmap = {'dc': 'http://purl.org/dc/elements/1.1/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/'}
doc = etree.Element('{%s}dc' % nsmap['oai_dc'], nsmap=nsmap)
for key, values in dc.items():
for value in values:
el = etree.SubElement(doc, '{%s}%s' % (nsmap['dc'], key))
el.text = value
data = etree.tostring(doc, encoding="UTF-8",
pretty_print=True, xml_declaration=False)
self._dc = None
super(DCDatastream, self).setContent(data, **params)
def __setitem__(self, key, value):
dc = self._get_dc()
dc[key]=value
def __getitem__(self, key):
dc = self._get_dc()
return dc[key]
def __delitem__(self, key):
dc = self._get_dc()
del dc[key]
def __contains__(self, key):
dc = self._get_dc()
return key in dc
def __iter__(self):
dc = self._get_dc()
return dc.__iter__()
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from wandb.proto import wandb_internal_pb2 as wandb_dot_proto_dot_wandb__internal__pb2
from wandb.proto import wandb_server_pb2 as wandb_dot_proto_dot_wandb__server__pb2
from wandb.proto import wandb_telemetry_pb2 as wandb_dot_proto_dot_wandb__telemetry__pb2
class InternalServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.RunUpdate = channel.unary_unary(
'/wandb_internal.InternalService/RunUpdate',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunUpdateResult.FromString,
)
self.Attach = channel.unary_unary(
'/wandb_internal.InternalService/Attach',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.AttachRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.AttachResponse.FromString,
)
self.TBSend = channel.unary_unary(
'/wandb_internal.InternalService/TBSend',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.TBRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.TBResult.FromString,
)
self.RunStart = channel.unary_unary(
'/wandb_internal.InternalService/RunStart',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunStartRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunStartResponse.FromString,
)
self.GetSummary = channel.unary_unary(
'/wandb_internal.InternalService/GetSummary',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.GetSummaryRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.GetSummaryResponse.FromString,
)
self.SampledHistory = channel.unary_unary(
'/wandb_internal.InternalService/SampledHistory',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.SampledHistoryRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.SampledHistoryResponse.FromString,
)
self.PollExit = channel.unary_unary(
'/wandb_internal.InternalService/PollExit',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.PollExitRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.PollExitResponse.FromString,
)
self.Shutdown = channel.unary_unary(
'/wandb_internal.InternalService/Shutdown',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ShutdownRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ShutdownResponse.FromString,
)
self.RunExit = channel.unary_unary(
'/wandb_internal.InternalService/RunExit',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunExitRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunExitResult.FromString,
)
self.RunPreempting = channel.unary_unary(
'/wandb_internal.InternalService/RunPreempting',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunPreemptingRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunPreemptingResult.FromString,
)
self.Metric = channel.unary_unary(
'/wandb_internal.InternalService/Metric',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.MetricRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.MetricResult.FromString,
)
self.PartialLog = channel.unary_unary(
'/wandb_internal.InternalService/PartialLog',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.PartialHistoryRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.PartialHistoryResponse.FromString,
)
self.Log = channel.unary_unary(
'/wandb_internal.InternalService/Log',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.HistoryRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.HistoryResult.FromString,
)
self.Summary = channel.unary_unary(
'/wandb_internal.InternalService/Summary',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.SummaryRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.SummaryResult.FromString,
)
self.Config = channel.unary_unary(
'/wandb_internal.InternalService/Config',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ConfigRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ConfigResult.FromString,
)
self.Files = channel.unary_unary(
'/wandb_internal.InternalService/Files',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.FilesRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.FilesResult.FromString,
)
self.Output = channel.unary_unary(
'/wandb_internal.InternalService/Output',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.OutputRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.OutputResult.FromString,
)
self.Telemetry = channel.unary_unary(
'/wandb_internal.InternalService/Telemetry',
request_serializer=wandb_dot_proto_dot_wandb__telemetry__pb2.TelemetryRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__telemetry__pb2.TelemetryResult.FromString,
)
self.Alert = channel.unary_unary(
'/wandb_internal.InternalService/Alert',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.AlertRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.AlertResult.FromString,
)
self.Artifact = channel.unary_unary(
'/wandb_internal.InternalService/Artifact',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactRecord.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactResult.FromString,
)
self.ArtifactSend = channel.unary_unary(
'/wandb_internal.InternalService/ArtifactSend',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactSendRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactSendResponse.FromString,
)
self.ArtifactPoll = channel.unary_unary(
'/wandb_internal.InternalService/ArtifactPoll',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactPollRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactPollResponse.FromString,
)
self.CheckVersion = channel.unary_unary(
'/wandb_internal.InternalService/CheckVersion',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.CheckVersionRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.CheckVersionResponse.FromString,
)
self.Pause = channel.unary_unary(
'/wandb_internal.InternalService/Pause',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.PauseRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.PauseResponse.FromString,
)
self.Resume = channel.unary_unary(
'/wandb_internal.InternalService/Resume',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ResumeRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ResumeResponse.FromString,
)
self.Status = channel.unary_unary(
'/wandb_internal.InternalService/Status',
request_serializer=wandb_dot_proto_dot_wandb__internal__pb2.StatusRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.StatusResponse.FromString,
)
self.ServerShutdown = channel.unary_unary(
'/wandb_internal.InternalService/ServerShutdown',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerShutdownRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerShutdownResponse.FromString,
)
self.ServerStatus = channel.unary_unary(
'/wandb_internal.InternalService/ServerStatus',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerStatusRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerStatusResponse.FromString,
)
self.ServerInformInit = channel.unary_unary(
'/wandb_internal.InternalService/ServerInformInit',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformInitRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformInitResponse.FromString,
)
self.ServerInformStart = channel.unary_unary(
'/wandb_internal.InternalService/ServerInformStart',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformStartRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformStartResponse.FromString,
)
self.ServerInformFinish = channel.unary_unary(
'/wandb_internal.InternalService/ServerInformFinish',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformFinishRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformFinishResponse.FromString,
)
self.ServerInformAttach = channel.unary_unary(
'/wandb_internal.InternalService/ServerInformAttach',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformAttachRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformAttachResponse.FromString,
)
self.ServerInformDetach = channel.unary_unary(
'/wandb_internal.InternalService/ServerInformDetach',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformDetachRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformDetachResponse.FromString,
)
self.ServerInformTeardown = channel.unary_unary(
'/wandb_internal.InternalService/ServerInformTeardown',
request_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformTeardownRequest.SerializeToString,
response_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformTeardownResponse.FromString,
)
class InternalServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def RunUpdate(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Attach(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TBSend(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunStart(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSummary(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SampledHistory(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PollExit(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Shutdown(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunExit(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunPreempting(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Metric(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PartialLog(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Log(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Summary(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Config(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Files(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Output(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Telemetry(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Alert(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Artifact(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ArtifactSend(self, request, context):
"""rpc messages for async operations: Send, Poll, Cancel, Release
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ArtifactPoll(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckVersion(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Pause(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Resume(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Status(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerShutdown(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerStatus(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerInformInit(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerInformStart(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerInformFinish(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerInformAttach(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerInformDetach(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerInformTeardown(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InternalServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'RunUpdate': grpc.unary_unary_rpc_method_handler(
servicer.RunUpdate,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunUpdateResult.SerializeToString,
),
'Attach': grpc.unary_unary_rpc_method_handler(
servicer.Attach,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.AttachRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.AttachResponse.SerializeToString,
),
'TBSend': grpc.unary_unary_rpc_method_handler(
servicer.TBSend,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.TBRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.TBResult.SerializeToString,
),
'RunStart': grpc.unary_unary_rpc_method_handler(
servicer.RunStart,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunStartRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunStartResponse.SerializeToString,
),
'GetSummary': grpc.unary_unary_rpc_method_handler(
servicer.GetSummary,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.GetSummaryRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.GetSummaryResponse.SerializeToString,
),
'SampledHistory': grpc.unary_unary_rpc_method_handler(
servicer.SampledHistory,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.SampledHistoryRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.SampledHistoryResponse.SerializeToString,
),
'PollExit': grpc.unary_unary_rpc_method_handler(
servicer.PollExit,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.PollExitRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.PollExitResponse.SerializeToString,
),
'Shutdown': grpc.unary_unary_rpc_method_handler(
servicer.Shutdown,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ShutdownRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ShutdownResponse.SerializeToString,
),
'RunExit': grpc.unary_unary_rpc_method_handler(
servicer.RunExit,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunExitRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunExitResult.SerializeToString,
),
'RunPreempting': grpc.unary_unary_rpc_method_handler(
servicer.RunPreempting,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.RunPreemptingRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.RunPreemptingResult.SerializeToString,
),
'Metric': grpc.unary_unary_rpc_method_handler(
servicer.Metric,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.MetricRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.MetricResult.SerializeToString,
),
'PartialLog': grpc.unary_unary_rpc_method_handler(
servicer.PartialLog,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.PartialHistoryRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.PartialHistoryResponse.SerializeToString,
),
'Log': grpc.unary_unary_rpc_method_handler(
servicer.Log,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.HistoryRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.HistoryResult.SerializeToString,
),
'Summary': grpc.unary_unary_rpc_method_handler(
servicer.Summary,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.SummaryRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.SummaryResult.SerializeToString,
),
'Config': grpc.unary_unary_rpc_method_handler(
servicer.Config,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ConfigRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ConfigResult.SerializeToString,
),
'Files': grpc.unary_unary_rpc_method_handler(
servicer.Files,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.FilesRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.FilesResult.SerializeToString,
),
'Output': grpc.unary_unary_rpc_method_handler(
servicer.Output,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.OutputRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.OutputResult.SerializeToString,
),
'Telemetry': grpc.unary_unary_rpc_method_handler(
servicer.Telemetry,
request_deserializer=wandb_dot_proto_dot_wandb__telemetry__pb2.TelemetryRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__telemetry__pb2.TelemetryResult.SerializeToString,
),
'Alert': grpc.unary_unary_rpc_method_handler(
servicer.Alert,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.AlertRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.AlertResult.SerializeToString,
),
'Artifact': grpc.unary_unary_rpc_method_handler(
servicer.Artifact,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactRecord.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactResult.SerializeToString,
),
'ArtifactSend': grpc.unary_unary_rpc_method_handler(
servicer.ArtifactSend,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactSendRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactSendResponse.SerializeToString,
),
'ArtifactPoll': grpc.unary_unary_rpc_method_handler(
servicer.ArtifactPoll,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactPollRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ArtifactPollResponse.SerializeToString,
),
'CheckVersion': grpc.unary_unary_rpc_method_handler(
servicer.CheckVersion,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.CheckVersionRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.CheckVersionResponse.SerializeToString,
),
'Pause': grpc.unary_unary_rpc_method_handler(
servicer.Pause,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.PauseRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.PauseResponse.SerializeToString,
),
'Resume': grpc.unary_unary_rpc_method_handler(
servicer.Resume,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.ResumeRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.ResumeResponse.SerializeToString,
),
'Status': grpc.unary_unary_rpc_method_handler(
servicer.Status,
request_deserializer=wandb_dot_proto_dot_wandb__internal__pb2.StatusRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__internal__pb2.StatusResponse.SerializeToString,
),
'ServerShutdown': grpc.unary_unary_rpc_method_handler(
servicer.ServerShutdown,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerShutdownRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerShutdownResponse.SerializeToString,
),
'ServerStatus': grpc.unary_unary_rpc_method_handler(
servicer.ServerStatus,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerStatusRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerStatusResponse.SerializeToString,
),
'ServerInformInit': grpc.unary_unary_rpc_method_handler(
servicer.ServerInformInit,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformInitRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformInitResponse.SerializeToString,
),
'ServerInformStart': grpc.unary_unary_rpc_method_handler(
servicer.ServerInformStart,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformStartRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformStartResponse.SerializeToString,
),
'ServerInformFinish': grpc.unary_unary_rpc_method_handler(
servicer.ServerInformFinish,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformFinishRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformFinishResponse.SerializeToString,
),
'ServerInformAttach': grpc.unary_unary_rpc_method_handler(
servicer.ServerInformAttach,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformAttachRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformAttachResponse.SerializeToString,
),
'ServerInformDetach': grpc.unary_unary_rpc_method_handler(
servicer.ServerInformDetach,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformDetachRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformDetachResponse.SerializeToString,
),
'ServerInformTeardown': grpc.unary_unary_rpc_method_handler(
servicer.ServerInformTeardown,
request_deserializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformTeardownRequest.FromString,
response_serializer=wandb_dot_proto_dot_wandb__server__pb2.ServerInformTeardownResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'wandb_internal.InternalService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class InternalService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def RunUpdate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/RunUpdate',
wandb_dot_proto_dot_wandb__internal__pb2.RunRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.RunUpdateResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Attach(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Attach',
wandb_dot_proto_dot_wandb__internal__pb2.AttachRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.AttachResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TBSend(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/TBSend',
wandb_dot_proto_dot_wandb__internal__pb2.TBRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.TBResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunStart(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/RunStart',
wandb_dot_proto_dot_wandb__internal__pb2.RunStartRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.RunStartResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetSummary(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/GetSummary',
wandb_dot_proto_dot_wandb__internal__pb2.GetSummaryRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.GetSummaryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SampledHistory(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/SampledHistory',
wandb_dot_proto_dot_wandb__internal__pb2.SampledHistoryRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.SampledHistoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PollExit(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/PollExit',
wandb_dot_proto_dot_wandb__internal__pb2.PollExitRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.PollExitResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Shutdown(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Shutdown',
wandb_dot_proto_dot_wandb__internal__pb2.ShutdownRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.ShutdownResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunExit(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/RunExit',
wandb_dot_proto_dot_wandb__internal__pb2.RunExitRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.RunExitResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunPreempting(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/RunPreempting',
wandb_dot_proto_dot_wandb__internal__pb2.RunPreemptingRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.RunPreemptingResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Metric(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Metric',
wandb_dot_proto_dot_wandb__internal__pb2.MetricRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.MetricResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PartialLog(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/PartialLog',
wandb_dot_proto_dot_wandb__internal__pb2.PartialHistoryRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.PartialHistoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Log(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Log',
wandb_dot_proto_dot_wandb__internal__pb2.HistoryRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.HistoryResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Summary(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Summary',
wandb_dot_proto_dot_wandb__internal__pb2.SummaryRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.SummaryResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Config(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Config',
wandb_dot_proto_dot_wandb__internal__pb2.ConfigRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.ConfigResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Files(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Files',
wandb_dot_proto_dot_wandb__internal__pb2.FilesRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.FilesResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Output(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Output',
wandb_dot_proto_dot_wandb__internal__pb2.OutputRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.OutputResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Telemetry(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Telemetry',
wandb_dot_proto_dot_wandb__telemetry__pb2.TelemetryRecord.SerializeToString,
wandb_dot_proto_dot_wandb__telemetry__pb2.TelemetryResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Alert(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Alert',
wandb_dot_proto_dot_wandb__internal__pb2.AlertRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.AlertResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Artifact(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Artifact',
wandb_dot_proto_dot_wandb__internal__pb2.ArtifactRecord.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.ArtifactResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ArtifactSend(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ArtifactSend',
wandb_dot_proto_dot_wandb__internal__pb2.ArtifactSendRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.ArtifactSendResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ArtifactPoll(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ArtifactPoll',
wandb_dot_proto_dot_wandb__internal__pb2.ArtifactPollRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.ArtifactPollResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CheckVersion(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/CheckVersion',
wandb_dot_proto_dot_wandb__internal__pb2.CheckVersionRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.CheckVersionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Pause(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Pause',
wandb_dot_proto_dot_wandb__internal__pb2.PauseRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.PauseResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Resume(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Resume',
wandb_dot_proto_dot_wandb__internal__pb2.ResumeRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.ResumeResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Status(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/Status',
wandb_dot_proto_dot_wandb__internal__pb2.StatusRequest.SerializeToString,
wandb_dot_proto_dot_wandb__internal__pb2.StatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerShutdown(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerShutdown',
wandb_dot_proto_dot_wandb__server__pb2.ServerShutdownRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerShutdownResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerStatus',
wandb_dot_proto_dot_wandb__server__pb2.ServerStatusRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerStatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerInformInit(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerInformInit',
wandb_dot_proto_dot_wandb__server__pb2.ServerInformInitRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerInformInitResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerInformStart(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerInformStart',
wandb_dot_proto_dot_wandb__server__pb2.ServerInformStartRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerInformStartResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerInformFinish(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerInformFinish',
wandb_dot_proto_dot_wandb__server__pb2.ServerInformFinishRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerInformFinishResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerInformAttach(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerInformAttach',
wandb_dot_proto_dot_wandb__server__pb2.ServerInformAttachRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerInformAttachResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerInformDetach(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerInformDetach',
wandb_dot_proto_dot_wandb__server__pb2.ServerInformDetachRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerInformDetachResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerInformTeardown(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/wandb_internal.InternalService/ServerInformTeardown',
wandb_dot_proto_dot_wandb__server__pb2.ServerInformTeardownRequest.SerializeToString,
wandb_dot_proto_dot_wandb__server__pb2.ServerInformTeardownResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import, print_function
import os
import struct
if os.environ.get('USE_TWISTED', False):
from twisted.trial import unittest
from twisted.internet.address import IPv4Address
from twisted.internet.task import Clock
from six import PY3
from autobahn.twisted.websocket import WebSocketServerProtocol
from autobahn.twisted.websocket import WebSocketServerFactory
from autobahn.twisted.websocket import WebSocketClientProtocol
from autobahn.twisted.websocket import WebSocketClientFactory
from mock import MagicMock, patch
from txaio.testutil import replace_loop
from base64 import b64decode
@patch('base64.b64encode')
def create_client_frame(b64patch, **kwargs):
"""
Kind-of hack-y; maybe better to re-factor the Protocol to have a
frame-encoder method-call? Anyway, makes a throwaway protocol
encode a frame for us, collects the .sendData call and returns
the data that would have gone out. Accepts all the kwargs that
WebSocketClientProtocol.sendFrame() accepts.
"""
# only real way to inject a "known" secret-key for the headers
# to line up... :/
b64patch.return_value = b'QIatSt9QkZPyS4QQfdufO8TgkL0='
factory = WebSocketClientFactory(protocols=['wamp.2.json'])
factory.protocol = WebSocketClientProtocol
factory.doStart()
proto = factory.buildProtocol(IPv4Address('TCP', '127.0.0.9', 65534))
proto.transport = MagicMock()
proto.connectionMade()
proto.data = mock_handshake_server
proto.processHandshake()
data = []
def collect(d, *args):
data.append(d)
proto.sendData = collect
proto.sendFrame(**kwargs)
return b''.join(data)
# beware the evils of line-endings...
mock_handshake_client = b'GET / HTTP/1.1\r\nUser-Agent: AutobahnPython/0.10.2\r\nHost: localhost:80\r\nUpgrade: WebSocket\r\nConnection: Upgrade\r\nPragma: no-cache\r\nCache-Control: no-cache\r\nSec-WebSocket-Key: 6Jid6RgXpH0RVegaNSs/4g==\r\nSec-WebSocket-Protocol: wamp.2.json\r\nSec-WebSocket-Version: 13\r\n\r\n'
mock_handshake_server = b'HTTP/1.1 101 Switching Protocols\r\nServer: AutobahnPython/0.10.2\r\nX-Powered-By: AutobahnPython/0.10.2\r\nUpgrade: WebSocket\r\nConnection: Upgrade\r\nSec-WebSocket-Protocol: wamp.2.json\r\nSec-WebSocket-Accept: QIatSt9QkZPyS4QQfdufO8TgkL0=\r\n\r\n\x81~\x02\x19[1,"crossbar",{"roles":{"subscriber":{"features":{"publisher_identification":true,"pattern_based_subscription":true,"subscription_revocation":true}},"publisher":{"features":{"publisher_identification":true,"publisher_exclusion":true,"subscriber_blackwhite_listing":true}},"caller":{"features":{"caller_identification":true,"progressive_call_results":true}},"callee":{"features":{"progressive_call_results":true,"pattern_based_registration":true,"registration_revocation":true,"shared_registration":true,"caller_identification":true}}}}]\x18'
class TestClient(unittest.TestCase):
def setUp(self):
self.factory = WebSocketClientFactory(protocols=['wamp.2.json'])
self.factory.protocol = WebSocketClientProtocol
self.factory.doStart()
self.proto = self.factory.buildProtocol(IPv4Address('TCP', '127.0.0.1', 65534))
self.transport = MagicMock()
self.proto.transport = self.transport
self.proto.connectionMade()
def tearDown(self):
if self.proto.openHandshakeTimeoutCall:
self.proto.openHandshakeTimeoutCall.cancel()
self.factory.doStop()
# not really necessary, but ...
del self.factory
del self.proto
def test_missing_reason_raw(self):
# we want to hit the "STATE_OPEN" case, so pretend we're there
self.proto.echoCloseCodeReason = True
self.proto.state = self.proto.STATE_OPEN
self.proto.websocket_version = 1
self.proto.sendCloseFrame = MagicMock()
self.proto.onCloseFrame(1000, None)
def test_unclean_timeout_client(self):
"""
make a delayed call to drop the connection (client-side)
"""
if False:
self.proto.factory._log = print
# get to STATE_OPEN
self.proto.websocket_key = b64decode('6Jid6RgXpH0RVegaNSs/4g==')
self.proto.data = mock_handshake_server
self.proto.processHandshake()
self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_OPEN)
self.assertTrue(self.proto.serverConnectionDropTimeout > 0)
with replace_loop(Clock()) as reactor:
# now 'do the test' and transition to CLOSING
self.proto.sendCloseFrame()
self.proto.onCloseFrame(1000, b"raw reason")
# check we scheduled a call
self.assertEqual(len(reactor.calls), 1)
self.assertEqual(reactor.calls[0].func, self.proto.onServerConnectionDropTimeout)
self.assertEqual(reactor.calls[0].getTime(), self.proto.serverConnectionDropTimeout)
# now, advance the clock past the call (and thereby
# execute it)
reactor.advance(self.proto.closeHandshakeTimeout + 1)
# we should have called abortConnection
self.assertEqual("call.abortConnection()", str(self.proto.transport.method_calls[-1]))
self.assertTrue(self.proto.transport.abortConnection.called)
# ...too "internal" for an assert?
self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_CLOSED)
class TestPing(unittest.TestCase):
def setUp(self):
self.factory = WebSocketServerFactory(protocols=['wamp.2.json'])
self.factory.protocol = WebSocketServerProtocol
self.factory.doStart()
self.proto = self.factory.buildProtocol(IPv4Address('TCP', '127.0.0.1', 65534))
self.transport = MagicMock()
self.proto.transport = self.transport
self.proto.connectionMade()
def tearDown(self):
self.factory.doStop()
# not really necessary, but ...
del self.factory
del self.proto
def test_unclean_timeout(self):
"""
make a delayed call to drop the connection
"""
# first we have to drive the protocol to STATE_CLOSING
# ... which we achieve by sendCloseFrame after we're in
# STATE_OPEN
# XXX double-check this is the correct code-path to get here
# "normally"?
# get to STATE_OPEN
self.proto.data = mock_handshake_client
self.proto.processHandshake()
self.assertTrue(self.proto.state == WebSocketServerProtocol.STATE_OPEN)
with replace_loop(Clock()) as reactor:
# now 'do the test' and transition to CLOSING
self.proto.sendCloseFrame()
# check we scheduled a call
self.assertEqual(len(reactor.calls), 1)
# now, advance the clock past the call (and thereby
# execute it)
reactor.advance(self.proto.closeHandshakeTimeout + 1)
# we should have called abortConnection
self.assertEqual("call.abortConnection()", str(self.proto.transport.method_calls[-1]))
self.assertTrue(self.proto.transport.abortConnection.called)
# ...too "internal" for an assert?
self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_CLOSED)
def test_auto_pingpong_timeout(self):
"""
autoping and autoping-timeout timing
"""
# options are evaluated in succeedHandshake, called below
self.proto.autoPingInterval = 5
self.proto.autoPingTimeout = 2
with replace_loop(Clock()) as reactor:
# get to STATE_OPEN
self.proto.data = mock_handshake_client
self.proto.processHandshake()
self.assertTrue(self.proto.state == WebSocketServerProtocol.STATE_OPEN)
# we should have scheduled an autoPing
self.assertEqual(1, len(reactor.calls))
# advance past first auto-ping timeout
reactor.advance(5)
# first element from args tuple from transport.write()
# call is our data
self.assertTrue(self.transport.write.called)
data = self.transport.write.call_args[0][0]
if PY3:
_data = bytes([data[0]])
else:
_data = data[0]
# the opcode is the lower 7 bits of the first byte.
(opcode,) = struct.unpack("B", _data)
opcode = opcode & (~0x80)
# ... and should be "9" for ping
self.assertEqual(9, opcode)
# Because we have autoPingTimeout there should be
# another delayed-called created now
self.assertEqual(1, len(reactor.calls))
self.assertNotEqual(self.proto.state, self.proto.STATE_CLOSED)
# ...which we'll now cause to trigger, aborting the connection
reactor.advance(3)
self.assertEqual(self.proto.state, self.proto.STATE_CLOSED)
def test_auto_ping_got_pong(self):
"""
auto-ping with correct reply cancels timeout
"""
# options are evaluated in succeedHandshake, called below
self.proto.autoPingInterval = 5
self.proto.autoPingTimeout = 2
with replace_loop(Clock()) as reactor:
# get to STATE_OPEN
self.proto.data = mock_handshake_client
self.proto.processHandshake()
self.assertTrue(self.proto.state == WebSocketServerProtocol.STATE_OPEN)
# we should have scheduled an autoPing
self.assertEqual(1, len(reactor.calls))
# advance past first auto-ping timeout
reactor.advance(5)
# should have an auto-ping timeout scheduled, and we
# save it for later (to check it got cancelled)
self.assertEqual(1, len(reactor.calls))
timeout_call = reactor.calls[0]
# elsewhere we check that we actually send an opcode-9
# message; now we just blindly inject our own reply
# with a PONG frame
frame = create_client_frame(opcode=10, payload=self.proto.autoPingPending)
self.proto.data = frame
# really needed twice; does header first, then rest
self.proto.processData()
self.proto.processData()
# which should have cancelled the call
self.assertTrue(timeout_call.cancelled)
|
|
# LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
http://geodjango.org/docs/layermapping.html
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
|
# Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
|
|
# -*- coding: utf-8 -*-
'''
Copyright (C) 2008 10gen Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import with_statement
import sys
import re
from django.utils import safestring
from django.conf import settings
settings.configure()
#patch Template to save the damn source
from django.template import Template
class HackTemplate(Template):
def __init__(self, template_string, **kwargs):
self.template_string = template_string
Template.__init__(self, template_string, **kwargs)
import django.template
django.template.Template = HackTemplate
#patch datetime.now
import datetime
olddatetime = datetime.datetime
class HackDatetime(datetime.datetime):
@classmethod
def now(cls, *args, **kw):
wrapped = HackDatetime.wrap(olddatetime.now(*args, **kw))
wrapped.delta = datetime.timedelta()
return wrapped
def __add__(self, delta):
wrapper = HackDatetime.wrap(olddatetime.__add__(self, delta))
wrapper.delta = self.delta + delta
return wrapper
def __sub__(self, delta):
wrapper = HackDatetime.wrap(olddatetime.__sub__(self, delta))
wrapper.delta = self.delta - delta
return wrapper
@classmethod
def wrap(cls, dt):
return HackDatetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo)
datetime.datetime = HackDatetime
#import django stuff
from django.utils.safestring import SafeData, EscapeString
from django.template import TemplateSyntaxError
from regressiontests.templates import tests
from regressiontests.templates import filters
exported_classes = (
tests.SomeException,
tests.SomeOtherException,
tests.SomeClass,
tests.OtherClass,
tests.UTF8Class,
filters.SafeClass,
filters.UnsafeClass,
TemplateSyntaxError,
HackTemplate, # requires args
)
preamble = """
/**
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
"""
def convert(py_tests):
#ignoring filter_tests
expected_invalid_str = 'INVALID'
buffer = preamble
buffer += "tests=[\n"
skip_count = 0
for name, vals in py_tests:
if isinstance(vals[2], tuple):
normal_string_result = vals[2][0]
invalid_string_result = vals[2][1]
if '%s' in invalid_string_result:
expected_invalid_str = 'INVALID %s'
invalid_string_result = invalid_string_result % vals[2][2]
else:
normal_string_result = vals[2]
invalid_string_result = vals[2]
#ignoring LANGUAGE_CORE for now
buffer += serialize_test(name, vals) + ",\n"
return buffer + "\n];"
def serialize_test(name, a_test):
#special case dates
if name == 'now01':
results = '%s + " " + %s + " " + %s' % ("((new Date()).getDate())", "((new Date()).getMonth() + 1)", "((new Date()).getYear())")
else:
results = serialize(a_test[2], True)
#rename var to var1
content = a_test[0]
return ' { name: %s, content: %s, model: %s, results: %s }' % ( serialize(name), serialize(content), serialize(a_test[1]), results)
def serialize(m, is_result=False):
if m is None:
return "null"
elif isinstance(m, (tuple, list)):
return "[ %s ]" % ", ".join( ["%s" % serialize(item) for item in m] )
elif isinstance(m, dict):
return '{ %s }' % ", ".join( ['%s: %s' % (serialize(key), serialize(value)) for key, value in m.items()] )
elif isinstance(m, SafeData):
return 'djang10.mark_safe(%s)' % serialize(str(m))
elif isinstance(m, HackDatetime):
secs = m.delta.days * 3600 * 24
secs += m.delta.seconds
return "from_now(%d)" % secs;
elif isinstance(m, basestring):
if(is_result):
encoding = "unicode_escape" if isinstance(m, unicode) else "string_escape"
m = m.encode(encoding).replace('"', '\\"')
else:
m = escape_str(m)
m = '"%s"' % m
if(isinstance(m, unicode)):
m = m.encode('utf-8')
return m
elif isinstance(m , (int, long) ):
return "%d" % m
elif isinstance(m, float):
return "%f" % m
elif isinstance(m, type):
if(m in exported_classes):
return m.__name__
raise Exception("can't serialize the type: %s" % m)
elif isinstance(m, object):
if(isinstance(m, HackTemplate)):
return 'new %s(%s)' % (m.__class__.__name__, serialize(m.template_string))
if(m.__class__ in exported_classes):
return "new %s()" % m.__class__.__name__
else:
raise Exception("Can't serialize the obj: %s" % m.__class__)
else:
raise Exception("can't serialize the model: %s" % m)
def escape_str(str):
def replace(match):
return ESCAPE_DCT[match.group(0)]
return ESCAPE.sub(replace, str)
''' String escaping'''
ESCAPE = re.compile(r'[\\"\b\f\n\r\t]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
''' Main '''
#do tag tests
print("converting tag tests")
items = tests.Templates("test_templates").get_template_tests().items()
items.sort()
with open("tests.js", "w") as f:
result = convert(items)
f.writelines(result)
#do filter tests
print("\nconverting filter tests")
items = filters.get_filter_tests().items()
items.sort()
with open("filter_tests.js", "w") as f:
result = convert(items)
f.writelines(result)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
# Copyright (c) 2012 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'DriverLoadFailure',
'InvalidTransportURL',
'Transport',
'TransportHost',
'TransportURL',
'get_transport',
'set_transport_defaults',
]
import urllib
import urlparse
from oslo.config import cfg
from stevedore import driver
from oslo.messaging import exceptions
_transport_opts = [
cfg.StrOpt('transport_url',
default=None,
help='A URL representing the messaging driver to use and its '
'full configuration. If not set, we fall back to the '
'rpc_backend option and driver specific configuration.'),
cfg.StrOpt('rpc_backend',
default='kombu',
help='The messaging driver to use, defaults to kombu. Other '
'drivers include qpid and zmq.'),
cfg.StrOpt('control_exchange',
default='openstack',
help='The default exchange under which topics are scoped. May '
'be overridden by an exchange name specified in the '
'transport_url option.'),
]
def set_transport_defaults(control_exchange):
"""Set defaults for messaging transport configuration options.
:param control_exchange: the default exchange under which topics are scoped
:type control_exchange: str
"""
cfg.set_defaults(_transport_opts,
control_exchange=control_exchange)
class Transport(object):
"""A messaging transport.
This is a mostly opaque handle for an underlying messaging transport
driver.
It has a single 'conf' property which is the cfg.ConfigOpts instance used
to construct the transport object.
"""
def __init__(self, driver):
self.conf = driver.conf
self._driver = driver
def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None):
if not target.topic:
raise exceptions.InvalidTarget('A topic is required to send',
target)
return self._driver.send(target, ctxt, message,
wait_for_reply=wait_for_reply,
timeout=timeout)
def _send_notification(self, target, ctxt, message, version):
if not target.topic:
raise exceptions.InvalidTarget('A topic is required to send',
target)
self._driver.send(target, ctxt, message, version)
def _listen(self, target):
if not (target.topic and target.server):
raise exceptions.InvalidTarget('A server\'s target must have '
'topic and server names specified',
target)
return self._driver.listen(target)
def cleanup(self):
"""Release all resources associated with this transport."""
self._driver.cleanup()
class InvalidTransportURL(exceptions.MessagingException):
"""Raised if transport URL is invalid."""
def __init__(self, url, msg):
super(InvalidTransportURL, self).__init__(msg)
self.url = url
class DriverLoadFailure(exceptions.MessagingException):
"""Raised if a transport driver can't be loaded."""
def __init__(self, driver, ex):
msg = 'Failed to load transport driver "%s": %s' % (driver, ex)
super(DriverLoadFailure, self).__init__(msg)
self.driver = driver
self.ex = ex
def get_transport(conf, url=None, allowed_remote_exmods=[]):
"""A factory method for Transport objects.
This method will construct a Transport object from transport configuration
gleaned from the user's configuration and, optionally, a transport URL.
If a transport URL is supplied as a parameter, any transport configuration
contained in it takes precedence. If no transport URL is supplied, but
there is a transport URL supplied in the user's configuration then that
URL will take the place of the url parameter. In both cases, any
configuration not supplied in the transport URL may be taken from
individual configuration parameters in the user's configuration.
An example transport URL might be::
rabbit://me:passwd@host:5672/virtual_host
and can either be passed as a string or a TransportURL object.
:param conf: the user configuration
:type conf: cfg.ConfigOpts
:param url: a transport URL
:type url: str or TransportURL
:param allowed_remote_exmods: a list of modules which a client using this
transport will deserialize remote exceptions
from
:type allowed_remote_exmods: list
"""
conf.register_opts(_transport_opts)
if not isinstance(url, TransportURL):
url = url or conf.transport_url
parsed = TransportURL.parse(conf, url)
if not parsed.transport:
raise InvalidTransportURL(url, 'No scheme specified in "%s"' % url)
url = parsed
kwargs = dict(default_exchange=conf.control_exchange,
allowed_remote_exmods=allowed_remote_exmods)
try:
mgr = driver.DriverManager('oslo.messaging.drivers',
url.transport,
invoke_on_load=True,
invoke_args=[conf, url],
invoke_kwds=kwargs)
except RuntimeError as ex:
raise DriverLoadFailure(url.transport, ex)
return Transport(mgr.driver)
class TransportHost(object):
"""A host element of a parsed transport URL."""
def __init__(self, hostname=None, port=None, username=None, password=None):
self.hostname = hostname
self.port = port
self.username = username
self.password = password
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not self == other
def __repr__(self):
attrs = []
for a in ['hostname', 'port', 'username', 'password']:
v = getattr(self, a)
if v:
attrs.append((a, repr(v)))
values = ', '.join(['%s=%s' % i for i in attrs])
return '<TransportHost ' + values + '>'
class TransportURL(object):
"""A parsed transport URL.
Transport URLs take the form::
transport://user:pass@host1:port[,hostN:portN]/virtual_host
i.e. the scheme selects the transport driver, you may include multiple
hosts in netloc and the path part is a "virtual host" partition path.
:param conf: a ConfigOpts instance
:type conf: oslo.config.cfg.ConfigOpts
:param transport: a transport name e.g. 'rabbit' or 'qpid'
:type transport: str
:param virtual_host: a virtual host path e.g. '/'
:type virtual_host: str
:param hosts: a list of TransportHost objects
:type hosts: list
"""
def __init__(self, conf, transport=None, virtual_host=None, hosts=None):
self.conf = conf
self.conf.register_opts(_transport_opts)
self._transport = transport
self._virtual_host = virtual_host
self._hosts = hosts
if self._hosts is None:
self._hosts = []
@property
def transport(self):
if self._transport is None:
return self.conf.rpc_backend
else:
return self._transport
@transport.setter
def transport(self, value):
self._transport = value
@property
def virtual_host(self):
return self._virtual_host
@virtual_host.setter
def virtual_host(self, value):
self._virtual_host = value
@property
def hosts(self):
return self._hosts
def __eq__(self, other):
return (self.transport == other.transport and
self.virtual_host == other.virtual_host and
self.hosts == other.hosts)
def __ne__(self, other):
return not self == other
def __repr__(self):
attrs = []
for a in ['transport', 'virtual_host', 'hosts']:
v = getattr(self, a)
if v:
attrs.append((a, repr(v)))
values = ', '.join(['%s=%s' % i for i in attrs])
return '<TransportURL ' + values + '>'
def __str__(self):
netlocs = []
for host in self.hosts:
username = host.username
password = host.password
hostname = host.hostname
port = host.port
# Starting place for the network location
netloc = ''
# Build the username and password portion of the transport URL
if username is not None or password is not None:
if username is not None:
netloc += urllib.quote(username, '')
if password is not None:
netloc += ':%s' % urllib.quote(password, '')
netloc += '@'
# Build the network location portion of the transport URL
if hostname:
if ':' in hostname:
netloc += '[%s]' % hostname
else:
netloc += hostname
if port is not None:
netloc += ':%d' % port
netlocs.append(netloc)
# Assemble the transport URL
url = '%s://%s/' % (self.transport, ','.join(netlocs))
if self.virtual_host:
url += urllib.quote(self.virtual_host)
return url
@classmethod
def parse(cls, conf, url):
"""Parse an url.
Assuming a URL takes the form of:
transport://user:pass@host1:port[,hostN:portN]/virtual_host
then parse the URL and return a TransportURL object.
Netloc is parsed following the sequence bellow:
* It is first splitted by ',' in order to support multiple hosts
* The last parsed username and password will be propagated to the rest
of hotsts specified:
user:passwd@host1:port1,host2:port2
[
{"username": "user", "password": "passwd", "host": "host1:port1"},
{"username": "user", "password": "passwd", "host": "host2:port2"}
]
* In order to avoid the above propagation, it is possible to alter the
order in which the hosts are specified or specify a set of fake
credentials using ",:@host2:port2"
user:passwd@host1:port1,:@host2:port2
[
{"username": "user", "password": "passwd", "host": "host1:port1"},
{"username": "", "password": "", "host": "host2:port2"}
]
:param conf: a ConfigOpts instance
:type conf: oslo.config.cfg.ConfigOpts
:param url: The URL to parse
:type url: str
:returns: A TransportURL
"""
if not url:
return cls(conf)
# FIXME(flaper87): Not PY3K compliant
if not isinstance(url, basestring):
raise InvalidTransportURL(url, 'Wrong URL type')
url = urlparse.urlparse(url)
# Make sure there's not a query string; that could identify
# requirements we can't comply with (e.g., ssl), so reject it if
# it's present
if '?' in url.path or url.query:
raise InvalidTransportURL(url.geturl(),
"Cannot comply with query string in "
"transport URL")
virtual_host = None
if url.path.startswith('/'):
virtual_host = url.path[1:]
hosts = []
username = password = ''
for host in url.netloc.split(','):
if not host:
continue
hostname = host
username = password = port = None
if '@' in host:
username, hostname = host.split('@', 1)
if ':' in username:
username, password = username.split(':', 1)
if not hostname:
hostname = None
elif hostname.startswith('['):
# Find the closing ']' and extract the hostname
host_end = hostname.find(']')
if host_end < 0:
# NOTE(Vek): Identical to what Python 2.7's
# urlparse.urlparse() raises in this case
raise ValueError("Invalid IPv6 URL")
port_text = hostname[host_end:]
hostname = hostname[1:host_end]
# Now we need the port; this is compliant with how urlparse
# parses the port data
port = None
if ':' in port_text:
port = int(port_text.split(':', 1)[1])
elif ':' in hostname:
hostname, port = hostname.split(':', 1)
port = int(port)
hosts.append(TransportHost(hostname=hostname,
port=port,
username=username,
password=password))
return cls(conf, url.scheme, virtual_host, hosts)
|
|
#!/usr/local/bin/python
# tabview.py -- View a tab-delimited file in a spreadsheet-like display.
# Contributed by A.M. Kuchling <[email protected]>
#
# The tab-delimited file is displayed on screen. The highlighted
# position is shown in the top-left corner of the screen; below it are
# shown the contents of that cell.
#
# Movement keys are:
# Cursor keys: Move the highlighted cell, scrolling if required.
# Q or q : Quit
# TAB : Page right a screen
# Home : Move to the start of this line
# End : Move to the end of this line
# PgUp/PgDn : Move a page up or down
# Insert : Memorize this position
# Delete : Return to memorized position (if any)
#
# TODO :
# A 'G' for Goto: enter a cell like AA260 and move there
# A key to re-read the tab-delimited file
#
# Possible projects:
# Allow editing of cells, and then saving the modified data
# Add formula evaluation, and you've got a simple spreadsheet
# program. (Actually, you should allow displaying both via curses and
# via a Tk widget.)
#
import curses, re, string
def yx2str(y,x):
"Convert a coordinate pair like 1,26 to AA2"
if x<26: s=chr(65+x)
else:
x=x-26
s=chr(65+ (x/26) ) + chr(65+ (x%26) )
s=s+str(y+1)
return s
coord_pat = re.compile('^(?P<x>[a-zA-Z]{1,2})(?P<y>\d+)$')
def str2yx(s):
"Convert a string like A1 to a coordinate pair like 0,0"
match = coord_pat.match(s)
if not match: return None
y,x = match.group('y', 'x')
x = string.upper(x)
if len(x)==1: x=ord(x)-65
else:
x= (ord(x[0])-65)*26 + ord(x[1])-65 + 26
return string.atoi(y)-1, x
assert yx2str(0,0) == 'A1'
assert yx2str(1,26) == 'AA2'
assert str2yx('AA2') == (1,26)
assert str2yx('B2') == (1,1)
class TabFile:
def __init__(self, scr, filename, column_width=20):
self.scr=scr ; self.filename = filename
self.column_width = column_width
f=open(filename, 'r')
self.data = []
while (1):
L=f.readline()
if L=="": break
self.data.append( string.split(L, '\t') )
# if len(self.data)>6: break # XXX
self.x, self.y = 0,0
self.win_x, self.win_y = 0,0
self.max_y, self.max_x = self.scr.getmaxyx()
self.num_columns = int(self.max_x/self.column_width)
self.scr.clear()
self.display()
def move_to_end(self):
"""Move the highlighted location to the end of the current line."""
# This is a method because I didn't want to have the code to
# handle the End key be aware of the internals of the TabFile object.
yp=self.y+self.win_y ; xp=self.x+self.win_x
if len(self.data)<=yp: end=0
else: end=len(self.data[yp])-1
# If the end column is on-screen, just change the
# .x value appropriately.
if self.win_x <= end < self.win_x + self.num_columns:
self.x = end - self.win_x
else:
if end<self.num_columns:
self.win_x = 0 ; self.x = end
else:
self.x = self.num_columns-1
self.win_x = end-self.x
def display(self):
"""Refresh the current display"""
self.scr.addstr(0,0,
yx2str(self.y + self.win_y, self.x+self.win_x)+' ',
curses.A_REVERSE)
for y in range(0, self.max_y-3):
self.scr.move(y+2,0) ; self.scr.clrtoeol()
for x in range(0, int(self.max_x / self.column_width) ):
self.scr.attrset(curses.A_NORMAL)
yp=y+self.win_y ; xp=x+self.win_x
if len(self.data)<=yp: s=""
elif len(self.data[yp])<=xp: s=""
else: s=self.data[yp][xp]
s = string.ljust(s, 15)[0:15]
if x==self.x and y==self.y: self.scr.attrset(curses.A_STANDOUT)
self.scr.addstr(y+2, x*self.column_width, s)
yp=self.y+self.win_y ; xp=self.x+self.win_x
if len(self.data)<=yp: s=""
elif len(self.data[yp])<=xp: s=""
else: s=self.data[yp][xp]
self.scr.move(1,0) ; self.scr.clrtoeol()
self.scr.addstr(s[0:self.max_x])
self.scr.refresh()
def main(stdscr):
import string, curses, sys
if len(sys.argv)==1:
print 'Usage: tabview.py <filename>'
return
filename=sys.argv[1]
# Clear the screen and display the menu of keys
stdscr.clear()
file = TabFile(stdscr, filename)
# Main loop:
while (1):
stdscr.move(file.y+2, file.x*file.column_width) # Move the cursor
c=stdscr.getch() # Get a keystroke
if 0<c<256:
c=chr(c)
# Q or q exits
if c in 'Qq': break
# Tab pages one screen to the right
elif c=='\t':
file.win_x = file.win_x + file.num_columns
file.display()
else: pass # Ignore incorrect keys
# Cursor keys
elif c==curses.key_up:
if file.y == 0:
if file.win_y>0: file.win_y = file.win_y - 1
else: file.y=file.y-1
file.display()
elif c==curses.key_down:
if file.y < file.max_y-3 -1: file.y=file.y+1
else: file.win_y = file.win_y+1
file.display()
elif c==curses.key_left:
if file.x == 0:
if file.win_x>0: file.win_x = file.win_x - 1
else: file.x=file.x-1
file.display()
elif c==curses.key_right:
if file.x < int(file.max_x/file.column_width)-1: file.x=file.x+1
else: file.win_x = file.win_x+1
file.display()
# Home key moves to the start of this line
elif c==curses.key_home:
file.win_x = file.x = 0
file.display()
# End key moves to the end of this line
elif c==curses.key_end:
file.move_to_end()
file.display()
# PageUp moves up a page
elif c==curses.key_ppage:
file.win_y = file.win_y - (file.max_y - 2)
if file.win_y<0: file.win_y = 0
file.display()
# PageDn moves down a page
elif c==curses.key_npage:
file.win_y = file.win_y + (file.max_y - 2)
if file.win_y<0: file.win_y = 0
file.display()
# Insert memorizes the current position
elif c==curses.key_ic:
file.save_y, file.save_x = file.y + file.win_y, file.x + file.win_x
# Delete restores a saved position
elif c==curses.key_dc:
if hasattr(file, 'save_y'):
file.x = file.y = 0
file.win_y, file.win_x = file.save_y, file.save_x
file.display()
else:
stdscr.addstr(0,50, curses.keyname(c)+ ' pressed')
stdscr.refresh()
pass # Ignore incorrect keys
if __name__=='__main__':
import curses, traceback
try:
# Initialize curses
stdscr=curses.initscr()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho() ; curses.cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted and
# a special value like curses.key_left will be returned
stdscr.keypad(1)
main(stdscr) # Enter the main loop
# Set everything back to normal
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin() # Terminate curses
except:
# In the event of an error, restore the terminal
# to a sane state.
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin()
traceback.print_exc() # Print the exception
|
|
import sys, getopt, re, datetime
import xml.etree.ElementTree as etree
import yaml
from os import listdir, rename
from os.path import isdir, isfile, join, splitext, abspath, exists
# namespace for METS
NS = { 'METS' : 'http://www.loc.gov/METS/' }
RPT_LINES = []
HELP = '''\nSYNOPSIS
python3 reseqr.py -h -s -x -c <config> -p <project name> -b <batch name>
DESCRIPTION
-h help
-s write renaming script to batch directory
-x execute renaming of files
-c configuration file path and name, overrides default
-p project name identifier in configuration file, overrides default
-b batch directory name to be processed (required option)
'''
config = None #global access
def rpt(msg, quit = False, quiet = False):
RPT_LINES.append(msg)
if not quiet:
print(msg)
if quit:
if config is not None:
with open(config['report_path'], 'w') as rfile:
for line in RPT_LINES: rfile.write(line + '\n')
print(' -- Quitting Reseqr\n')
sys.exit(2)
def read_project_config(config_file, project):
''' config parse and check for required values '''
if config_file == None:
config_file = 'reseqr.config' # in current working directory
rpt(' Using default config file: ' + config_file)
try:
fyaml = open(config_file)
config_all = yaml.safe_load(fyaml)
except IOError:
rpt('Unable to open config file: ' + config_file, True)
else:
fyaml.close()
if project == None:
try:
project = config_all['default project']
except KeyError:
rpt('Unable to read default project name in configuration file. ', True)
# configuration values now available
if project not in config_all:
rpt('project "' + project + '" not listed in configuration file', True)
pconfig = config_all[project]
#project_name = config['project_name']
#project_path = config['project_path']
#mets_path = config['mets_path']
if 'project_name' not in pconfig:
rpt('Project name not available in config.', False)
pconfig['project_name'] = 'Missing project name'
rpt('\nProject: "{}" located at {}'.format(pconfig['project_name'], abspath(pconfig['project_path'])), False)
if 'project_path' not in pconfig:
rpt('Project path not available in config.', True)
if 'mets_path' not in pconfig:
rpt('METS path not available in config.', True)
if 'extension' not in pconfig:
pconfig['extension'] = '.jp2'
rpt('Extension not in config, using .jp2')
if 'strict_mode' not in pconfig:
pconfig['strict_mode'] = True
global config
config = pconfig
def get_batch_data(batch):
batchpath = join(config['project_path'], batch)
if not exists(batchpath):
rpt('Specified batch does not exist: ' + batchpath, True)
rpt('Processing batch "{}"\n'.format(batch))
# won't throw exception if there are empty directories
subdir_dict = { d : { f for f in listdir(join(batchpath, d)) if (d != 'mets') }
for d in listdir(batchpath) if isdir(join(batchpath, d)) and (d != 'mets') }
#rpt('subdirectories with files: ' + str(subdir_dict) + '\n')
#subdirs = sorted([ d for d in listdir(batchpath) if isdir(join(batchpath, d)) and (d != 'mets') ])
subdirs = sorted(subdir_dict.keys())
desc = 'Batch directory summary:\n'
for sd in subdirs:
desc += ' {} with {:d} files\n'.format(sd, len(subdir_dict[sd]))
desc += '\n'
rpt(desc)
#check for renaming prefix to prevent duplicate processing
for sd in subdirs:
for f in subdir_dict[sd]:
if f.startswith(config['local_renaming_prefix']):
rpt('subdirectory {} contains file already renamed'.format(sd), True)
return subdirs, subdir_dict
def get_mets_file_data(re_pattern, mf):
'''
for each metsfile
read all the structMap.div.div.fptr
ToDo: verify just one fptr per div
Note: the FILEID has a production prefix + batch subdir prefix + seq number
where the subdir prefix is unique to the metsfile in the given batch
verify the FILEID matches the regular expression used for data extraction
verify they have the same prefix
ToDo: verify continuity of order numbers
<structMap>
<div DMDID="C0" TYPE="CITATION">
<div ORDER="1" LABEL="Hooker, Joseph D. June 12, 1873 [1]" TYPE="PAGE">
<fptr FILEID="FIMG-JP2-GenA_0001"/>
</div>
'''
# does this need exception handling?
tree = etree.parse(mf)
root = tree.getroot()
fptr_data = [] # list of fptrs, each a dictionary with fields order, prefix, seqno
prefixes = set() # distinct prefixes
for div in root.findall("./METS:structMap/METS:div/METS:div", NS):
fptrs = div.findall("METS:fptr", NS)
if len(fptrs) is not 1:
rpt('METS div/div with no fptr or multiple fptr tags where ORDER = {}'.format(div.get("ORDER")), True)
#print('fptrs: ' + str(len(fptrs)))
fptr = div.find("METS:fptr", NS)
m = re_pattern.match(fptr.get("FILEID"))
if m:
#print('group 1: ' + m.group(1) + '; group 2: ' + m.group(2) + '; group 3: ' + m.group(3))
prefixes.add(m.group(2))
fptr_data.append({ 'order' : div.get("ORDER"), 'filename' : m.group(1) + config['extension'], 'seqno' : m.group(3) })
else:
rpt('METS FILEID does not match regular expression', True)
#rpt('fptrdata: ' + str(fptr_data))
prefix = None
if len(prefixes) == 1:
prefix = prefixes.pop()
else:
rpt('multiple FILEID prefixes for mets file ' + mf + ' : ' + str(prefixes), True)
return prefix, fptr_data
def get_mets_data(batch):
re_pattern = re.compile(r'' + config['imaging_services_prefix'] + '((\w+)_(\d+))')
metsbatchpath = join(config['mets_path'], batch, 'mets')
if not exists(metsbatchpath):
rpt('METS directory for batch {} not found'.format(batch), True)
metsfiles = sorted([ f for f in listdir(metsbatchpath) if splitext(f)[1] == '.xml' ]) # join path?
#rpt(str(len(metsfiles)) + ' metsfiles: ' + str(metsfiles))
if len(metsfiles) == 0:
rpt("No mets files found for this batch", True)
desc = 'METS files summary:\n'
metsdata_dict = {}
for mf in metsfiles:
prefix, metsdata = get_mets_file_data(re_pattern, join(metsbatchpath, mf))
metsdata_dict[prefix] = metsdata
desc += ' {} with prefix "{}" listing {:d} file items\n'.format(mf, prefix, len(metsdata))
desc += '\n'
rpt(desc)
return metsdata_dict
def compare_drive_to_mets(subdirs, subdir_dict, metsdata_dict):
rpt('Validation:')
# correlate batch subdirs and mets files
# sort and compare subdirs and metsdata dictionaries
if len(subdirs) != len(metsdata_dict):
rpt('subdirs count {} != mets count {}'.format(len(subdirs), len(metsdata_dict)), True)
set_subdirs = set(subdirs)
set_metsdata = set(metsdata_dict.keys())
if set_subdirs != set_metsdata:
rpt('subdirs names do not match mets file prefixes: ', False) # exit after listing
#list the differences
diff_subdirs = set_subdirs - set_metsdata
if len(diff_subdirs) > 0:
rpt(' subdirs without corresponding mets files: {}'.format(diff_subdirs), True)
diff_metsdata = set_metsdata - set_subdirs
if len(diff_metsdata) > 0:
rpt(' mets files without corresponding subdirs: {}'.format(diff_metsdata), True)
else:
rpt(' subdirectories match mets file prefixes')
#compare file counts and fptr counts - check all before exiting on error
files_unlisted = False #i.e. not in METS but on drive, not critical in non-strict mode
files_unlisted_threshold_reached = False
files_missing = False
for sd in subdirs:
fcount = len(subdir_dict[sd])
fptrcount = len(metsdata_dict[sd])
if len(subdir_dict[sd]) != len(metsdata_dict[sd]):
rpt(' In subdirectory {} mismatch of {:d} files with {:d} METS fptrs'.format(sd, fcount, fptrcount))
else:
rpt(' subdirectory {} has same number of files as listed by associated mets file'.format(sd))
#check for additional unlisted files not in METS
fname_set = { fptr['filename'] for fptr in metsdata_dict[sd] }
unlisted_count = 0
for f in sorted(subdir_dict[sd]):
if f not in fname_set:
if unlisted_count == 0:
files_unlisted = True
rpt(' Files in subdirectory {} with no corresonding fptr fileid in METS:'.format(sd))
unlisted_count += 1
rpt(' {}'.format(f))
if unlisted_count >= config['unlisted_files_threshold']:
files_unlisted_threshold_reached = True
rpt(' Threshold of {} unlisted files reached for subdirectory {}'.format(config['unlisted_files_threshold'], sd))
#check if any file not found on drive
missing_count = 0
for fptr in metsdata_dict[sd]:
if fptr['filename'] not in subdir_dict[sd]:
if missing_count == 0:
files_missing = True
rpt(' Filenames listed in METS not found in drive subdirectory {}'.format(sd))
missing_count += 1
rpt(' {}'.format(fptr['filename']))
if files_missing or files_unlisted_threshold_reached:
rpt(' end listing of mismatches', True)
if files_unlisted:
rpt(' end listing of mismatches', config['strict_mode'])
else:
rpt(' confirmed one-to-one correspondence between all METS fptr items and files on drive')
def write_renaming_script(metsdata_dict, batch):
'''
currently only Python
currently don't separate scripts for each subdir
note checking if script already exists, assume overwriting is intentional
'''
fname = join(config['project_path'], batch , batch + '-rename-script.py')
lc = 0
try:
with open(fname, 'w') as script:
script.write('import os\n\n')
script.write('print(\'Renaming of files in batch {}\')\n'.format(batch))
for subdir in sorted(metsdata_dict.keys()): #ToDo: use subdirs list instead
chunk = ''
ren_prefix = config['local_renaming_prefix'] + subdir + '_'
for fptr in metsdata_dict[subdir]:
#put the same zero padding in the new file name as found in the FILEID
template = '{}{:0' + str(len(fptr['seqno'])) + 'd}' + config['extension']
chunk += ('os.rename( \'{0}\', \'{1}\')\n'.format(join(subdir, fptr['filename']),
join(subdir, template.format(ren_prefix, int(fptr['order'])))))
lc += 1
script.write(chunk + '\n\n')
script.write('print(\'Renaming complete.\')')
except (OSError, IOError) as e:
rpt('Error writing renaming script: {}'.format(e), True)
rpt('Wrote script {} with {:d} renaming lines'.format(fname, lc ))
def write_undo_script(metsdata_dict, batch):
'''
currently only Python
currently don't separate scripts for each subdir
note checking if script already exists, assume overwriting is intentional
'''
fname = join(config['project_path'], batch , batch + '-undo-script.py')
lcount = 0
try:
with open(fname, 'w') as script:
script.write('import os\n\n')
script.write('print(\'Undo renaming of file in batch {}\')\n'.format(batch))
for subdir in sorted(metsdata_dict.keys()): #ToDo: use subdirs list instead
chunk = ''
ren_prefix = config['local_renaming_prefix'] + subdir + '_'
for fptr in metsdata_dict[subdir]:
#put the same zero padding in the new file name as found in the FILEID
template = '{}{:0' + str(len(fptr['seqno'])) + 'd}' + config['extension']
chunk += ('os.rename( \'{1}\', \'{0}\')\n'.format(join(subdir, fptr['filename']),
join(subdir, template.format(ren_prefix, int(fptr['order'])))))
lcount += 1
script.write(chunk + '\n\n')
script.write('print(\'Undo complete.\')')
except (OSError, IOError) as e:
rpt('Error writing undo script: {}'.format(e), True)
rpt('Wrote undo script {} with {:d} renaming lines'.format(fname, lcount))
def rename_files(metsdata_dict, batchpath):
count = 0
try:
for subdir in sorted(metsdata_dict.keys()):
ren_prefix = config['local_renaming_prefix'] + subdir + '_'
for fptr in metsdata_dict[subdir]:
src = join(subdir, fptr['filename'])
#put the same zero padding in the new file name as found in the FILEID
template = '{}{:0' + str(len(fptr['seqno'])) + 'd}' + config['extension']
dest = join(subdir, template.format(ren_prefix, int(fptr['order'])))
rename(join(batchpath, src), join(batchpath, dest))
rpt('Renamed {} to {}'.format(src, dest), False, True) #don't write to screen
count += 1
except IOError as err:
rpt('Error renaming files: {}'.format(err), True)
rpt('Renamed {} files'.format(count))
def main():
rpt('Image File Resequencer: Reseqr')
rpt(' Processing at {}'.format(datetime.datetime.now()))
#default command line option values
write_script = False
execute_rename = False
config_file = None
project = None
batch = None
try:
opts, args = getopt.getopt(sys.argv[1:],"hvsxc:p:b:")
except getopt.GetoptError:
rpt('getopterr: ' + HELP, True)
for opt, arg in opts:
if opt == '-h':
print(HELP)
sys.exit()
elif opt == '-s':
write_script = True
elif opt == '-x':
execute_rename = True
#elif opt == '-f':
# force = True
elif opt == 'c':
config_file = arg #includes path
elif opt == '-p':
project = arg
elif opt == "-b":
batch = arg
#required option
if batch == None:
rpt('batch param required. ' + HELP, True)
# assign to global
read_project_config(config_file, project)
#strict mode
if config['strict_mode']:
rpt('Running in strict mode')
else:
rpt('Running in non-strict mode')
#init reporting
config['report_path'] = join(config['project_path'], batch, batch + '-report.txt')
# batch subdirs and files
subdirs, subdir_dict = get_batch_data(batch)
# METS
metsdata_dict = get_mets_data(batch)
# valid correlation
compare_drive_to_mets(subdirs, subdir_dict, metsdata_dict)
rpt('\n')
if write_script:
write_renaming_script(metsdata_dict, batch)
elif execute_rename:
rename_files(metsdata_dict, join(config['project_path'], batch))
if write_script or execute_rename:
write_undo_script(metsdata_dict, batch)
rpt('\nProcessing complete\n')
if __name__ == "__main__":
main()
|
|
"""Python library to enable Axis devices to integrate with Home Assistant."""
# PYTHON RTSP INSPIRATION
# https://github.com/timohoeting/python-mjpeg-over-rtsp-client/blob/master/rtsp_client.py
# https://github.com/perexg/satip-axe/blob/master/tools/multicast-rtp
import asyncio
from collections import deque
import logging
import socket
from typing import Any, Callable, Deque, Dict, List, Optional
_LOGGER = logging.getLogger(__name__)
RTSP_PORT = 554
STATE_PAUSED = "paused"
STATE_PLAYING = "playing"
STATE_STARTING = "starting"
STATE_STOPPED = "stopped"
SIGNAL_DATA = "data"
SIGNAL_FAILED = "failed"
SIGNAL_PLAYING = "playing"
TIME_OUT_LIMIT = 5
class RTSPClient(asyncio.Protocol):
"""RTSP transport, session handling, message generation."""
def __init__(
self, url: str, host: str, username: str, password: str, callback: Callable
) -> None:
"""RTSP."""
self.loop = asyncio.get_running_loop()
self.callback = callback
self.rtp = RTPClient(self.loop, callback)
self.session = RTSPSession(url, host, username, password)
self.session.rtp_port = self.rtp.port
self.session.rtcp_port = self.rtp.rtcp_port
self.method = RTSPMethods(self.session)
self.transport: Optional[asyncio.BaseTransport] = None
self.keep_alive_handle: Optional[asyncio.TimerHandle] = None
self.time_out_handle: Optional[asyncio.TimerHandle] = None
async def start(self) -> None:
"""Start RTSP session."""
await self.rtp.start()
try:
await self.loop.create_connection(
lambda: self, self.session.host, self.session.port
)
except OSError as err:
_LOGGER.debug("RTSP got exception %s", err)
self.stop()
self.callback(SIGNAL_FAILED)
def stop(self) -> None:
"""Stop session."""
self.session.stop()
if self.transport:
self.transport.write(self.method.message.encode()) # type: ignore [attr-defined]
self.transport.close()
self.rtp.stop()
if self.keep_alive_handle is not None:
self.keep_alive_handle.cancel()
if self.time_out_handle is not None:
self.time_out_handle.cancel()
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""Connect to device is successful.
Start configuring RTSP session.
Schedule time out handle in case device doesn't respond.
"""
self.transport = transport
self.transport.write(self.method.message.encode()) # type: ignore [attr-defined]
self.time_out_handle = self.loop.call_later(TIME_OUT_LIMIT, self.time_out)
def data_received(self, data: bytes) -> None:
"""Got response on RTSP session.
Manage time out handle since response came in a reasonable time.
Update session parameters with latest response.
If state is playing schedule keep-alive.
"""
self.time_out_handle.cancel() # type: ignore [union-attr]
self.session.update(data.decode())
if self.session.state == STATE_STARTING:
self.transport.write(self.method.message.encode()) # type: ignore [union-attr]
self.time_out_handle = self.loop.call_later(TIME_OUT_LIMIT, self.time_out)
elif self.session.state == STATE_PLAYING:
self.callback(SIGNAL_PLAYING)
if self.session.session_timeout != 0:
interval = self.session.session_timeout - 5
self.keep_alive_handle = self.loop.call_later(interval, self.keep_alive)
else:
self.stop()
def keep_alive(self) -> None:
"""Keep RTSP session alive per negotiated time interval."""
self.transport.write(self.method.message.encode()) # type: ignore [union-attr]
self.time_out_handle = self.loop.call_later(TIME_OUT_LIMIT, self.time_out)
def time_out(self) -> None:
"""If we don't get a response within time the RTSP request time out.
This usually happens if device isn't available on specified IP.
"""
_LOGGER.warning("Response timed out %s", self.session.host)
self.stop()
self.callback(SIGNAL_FAILED)
def connection_lost(self, exc: Optional[Exception]) -> None:
"""Happens when device closes connection or stop() has been called."""
_LOGGER.debug("RTSP session lost connection")
class RTPClient:
"""Data connection to device.
When data is received send a signal on callback to whoever is interested.
"""
def __init__(self, loop: Any, callback: Optional[Callable] = None) -> None:
"""Configure and bind socket.
We need to bind the port for RTSP before setting up the endpoint
since it will block until a connection has been set up and
the port is needed for setting up the RTSP session.
"""
self.loop = loop
self.client = self.UDPClient(callback)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(("", 0))
self.port = self.sock.getsockname()[1]
self.rtcp_port = self.port + 1
async def start(self) -> None:
"""Start RTP client."""
await self.loop.create_datagram_endpoint(lambda: self.client, sock=self.sock)
def stop(self) -> None:
"""Close transport from receiving any more packages."""
if self.client.transport:
self.client.transport.close()
@property
def data(self) -> str:
"""Refer to most recently received data."""
try:
return self.client.data.popleft()
except IndexError:
return ""
class UDPClient:
"""Datagram recepient for device data."""
def __init__(self, callback: Optional[Callable]) -> None:
"""Signal events to subscriber using callback."""
self.callback = callback
self.data: Deque[str] = deque()
self.transport: Optional[asyncio.BaseTransport] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""Execute when port is up and listening.
Save reference to transport for future control.
"""
_LOGGER.debug("Stream listener online")
self.transport = transport
def connection_lost(self, exc: Optional[Exception]) -> None:
"""Signal retry if RTSP session fails to get a response."""
_LOGGER.debug("Stream recepient offline")
def datagram_received(self, data: str, addr: Any) -> None:
"""Signals when new data is available."""
if self.callback:
self.data.append(data[12:])
self.callback("data")
class RTSPSession:
"""All RTSP session data.
Stores device stream configuration and session data.
"""
def __init__(self, url: str, host: str, username: str, password: str) -> None:
"""Session parameters."""
self._basic_auth: Optional[str] = None
self.sequence = 0
self.url = url
self.host = host
self.port = RTSP_PORT
self.username = username
self.password = password
self.user_agent = "HASS Axis"
self.rtp_port = None
self.rtcp_port = None
self.methods = [
"OPTIONS",
"DESCRIBE",
"SETUP",
"PLAY",
"KEEP-ALIVE",
"TEARDOWN",
]
# Information as part of ack from device
self.rtsp_version: Optional[int] = None
self.status_code: Optional[int] = None
self.status_text: Optional[str] = None
self.sequence_ack: Optional[int] = None
self.date: Optional[str] = None
self.methods_ack: Optional[List[str]] = None
self.basic = False
self.digest = False
self.realm: Optional[str] = None
self.nonce: Optional[str] = None
self.stale: Optional[bool] = None
self.content_type: Optional[str] = None
self.content_base: Optional[str] = None
self.content_length: Optional[int] = None
self.session_id: Optional[str] = None
self.session_timeout = 0
self.transport_ack: Optional[str] = None
self.range: Optional[str] = None
self.rtp_info: Optional[str] = None
self.sdp: Optional[List[str]] = None
self.control_url: Optional[str] = None
@property
def method(self) -> str:
"""Which method the sequence number corresponds to.
0 - OPTIONS
1 - DESCRIBE
2 - SETUP
3 - PLAY
4 - KEEP-ALIVE (OPTIONS)
5 - TEARDOWN
"""
return self.methods[self.sequence]
@property
def state(self) -> str:
"""Which state the session is in.
Starting - all messages needed to get stream started.
Playing - keep-alive messages every self.session_timeout.
"""
if self.method in ["OPTIONS", "DESCRIBE", "SETUP", "PLAY"]:
state = STATE_STARTING
elif self.method in ["KEEP-ALIVE"]:
state = STATE_PLAYING
else:
state = STATE_STOPPED
return state
def update(self, response: str) -> None:
"""Update session information from device response.
Increment sequence number when starting stream, not when playing.
If device requires authentication resend previous message with auth.
"""
data = response.splitlines()
_LOGGER.debug("Received data %s from %s", data, self.host)
while data:
line = data.pop(0)
if "RTSP/1.0" in line:
self.rtsp_version = int(line.split(" ")[0][5])
self.status_code = int(line.split(" ")[1])
self.status_text = line.split(" ")[2]
elif "CSeq" in line:
self.sequence_ack = int(line.split(": ")[1])
elif "Date" in line:
self.date = line.split(": ")[1]
elif "Public" in line:
self.methods_ack = line.split(": ")[1].split(", ")
elif "WWW-Authenticate: Basic" in line:
self.basic = True
self.realm = line.split('"')[1]
elif "WWW-Authenticate: Digest" in line:
self.digest = True
self.realm = line.split('"')[1]
self.nonce = line.split('"')[3]
self.stale = line.split("stale=")[1] == "TRUE"
elif "Content-Type" in line:
self.content_type = line.split(": ")[1]
elif "Content-Base" in line:
self.content_base = line.split(": ")[1]
elif "Content-Length" in line:
self.content_length = int(line.split(": ")[1])
elif "Session" in line:
self.session_id = line.split(": ")[1].split(";")[0]
if "=" in line:
self.session_timeout = int(line.split(": ")[1].split("=")[1])
elif "Transport" in line:
self.transport_ack = line.split(": ")[1]
elif "Range" in line:
self.range = line.split(": ")[1]
elif "RTP-Info" in line:
self.rtp_info = line.split(": ")[1]
elif not line:
if data:
self.sdp = data
break
if self.sdp:
stream_found = False
for param in self.sdp:
if not stream_found and "m=application" in param:
stream_found = True
elif stream_found and "a=control:rtsp" in param:
self.control_url = param.split(":", 1)[1]
break
if self.status_code == 200:
if self.state == STATE_STARTING:
self.sequence += 1
elif self.status_code == 401:
# Device requires authorization, do not increment to next method
pass
else:
# If device configuration is correct we should never get here
_LOGGER.debug(
"%s RTSP %s %s", self.host, self.status_code, self.status_text
)
def generate_digest(self) -> str:
"""RFC 2617."""
from hashlib import md5
ha1 = f"{self.username}:{self.realm}:{self.password}"
HA1 = md5(ha1.encode("UTF-8")).hexdigest()
ha2 = f"{self.method}:{self.url}"
HA2 = md5(ha2.encode("UTF-8")).hexdigest()
encrypt_response = f"{HA1}:{self.nonce}:{HA2}"
response = md5(encrypt_response.encode("UTF-8")).hexdigest()
digest_auth = "Digest "
digest_auth += f'username="{self.username}", '
digest_auth += f'realm="{self.realm}", '
digest_auth += 'algorithm="MD5", '
digest_auth += f'nonce="{self.nonce}", '
digest_auth += f'uri="{self.url}", '
digest_auth += f'response="{response}"'
return digest_auth
def generate_basic(self) -> str:
"""RFC 2617."""
from base64 import b64encode
if not self._basic_auth:
creds = f"{self.username}:{self.password}"
self._basic_auth = "Basic "
self._basic_auth += b64encode(creds.encode("UTF-8")).decode("UTF-8")
return self._basic_auth
def stop(self) -> None:
"""Set session to stopped."""
self.sequence = 5
class RTSPMethods:
"""Generate RTSP messages based on session data."""
def __init__(self, session: RTSPSession) -> None:
"""Define message methods."""
self.session = session
self.message_methods: Dict[str, Callable] = {
"OPTIONS": self.OPTIONS,
"DESCRIBE": self.DESCRIBE,
"SETUP": self.SETUP,
"PLAY": self.PLAY,
"KEEP-ALIVE": self.KEEP_ALIVE,
"TEARDOWN": self.TEARDOWN,
}
@property
def message(self) -> str:
"""Return RTSP method based on sequence number from session."""
message = self.message_methods[self.session.method]()
_LOGGER.debug(message)
return message
def KEEP_ALIVE(self) -> str:
"""Keep-Alive messages doesn't need authentication."""
return self.OPTIONS(False)
def OPTIONS(self, authenticate: bool = True) -> str:
"""Request options device supports."""
message = f"OPTIONS {self.session.url} RTSP/1.0\r\n"
message += self.sequence
message += self.authentication if authenticate else ""
message += self.user_agent
message += self.session_id
message += "\r\n"
return message
def DESCRIBE(self) -> str:
"""Request description of what services RTSP server make available."""
message = f"DESCRIBE {self.session.url} RTSP/1.0\r\n"
message += self.sequence
message += self.authentication
message += self.user_agent
message += "Accept: application/sdp\r\n"
message += "\r\n"
return message
def SETUP(self) -> str:
"""Set up stream transport."""
message = f"SETUP {self.session.control_url} RTSP/1.0\r\n"
message += self.sequence
message += self.authentication
message += self.user_agent
message += self.transport
message += "\r\n"
return message
def PLAY(self) -> str:
"""RTSP session is ready to send data."""
message = f"PLAY {self.session.url} RTSP/1.0\r\n"
message += self.sequence
message += self.authentication
message += self.user_agent
message += self.session_id
message += "\r\n"
return message
def TEARDOWN(self) -> str:
"""Tell device to tear down session."""
message = f"TEARDOWN {self.session.url} RTSP/1.0\r\n"
message += self.sequence
message += self.authentication
message += self.user_agent
message += self.session_id
message += "\r\n"
return message
@property
def sequence(self) -> str:
"""Generate sequence string."""
return f"CSeq: {str(self.session.sequence)}\r\n"
@property
def authentication(self) -> str:
"""Generate authentication string."""
if self.session.digest:
authentication = self.session.generate_digest()
elif self.session.basic:
authentication = self.session.generate_basic()
else:
return ""
return f"Authorization: {authentication}\r\n"
@property
def user_agent(self) -> str:
"""Generate user-agent string."""
return f"User-Agent: {self.session.user_agent}\r\n"
@property
def session_id(self) -> str:
"""Generate session string."""
if self.session.session_id:
return f"Session: {self.session.session_id}\r\n"
return ""
@property
def transport(self) -> str:
"""Generate transport string."""
return f"Transport: RTP/AVP;unicast;client_port={self.session.rtp_port}-{self.session.rtcp_port}\r\n"
|
|
# -*- coding: utf-8 -*-
from datetime import datetime
from dyn.compat import force_unicode
from dyn.tm.errors import DynectInvalidArgumentError
from dyn.tm.session import DynectSession
from dyn.tm.utils import APIList, Active, unix_date
__author__ = 'jnappi'
__all__ = ['get_all_dnssec', 'DNSSECKey', 'DNSSEC']
def get_all_dnssec():
""":return: A ``list`` of :class:`DNSSEC` Services"""
uri = '/DNSSEC/'
api_args = {'detail': 'Y'}
response = DynectSession.get_session().execute(uri, 'GET', api_args)
dnssecs = []
for dnssec in response['data']:
zone = dnssec['zone']
del dnssec['zone']
dnssecs.append(DNSSEC(zone, api=False, **dnssec))
return dnssecs
class DNSSECKey(object):
"""A Key used by the DNSSEC service"""
def __init__(self, key_type, algorithm, bits, start_ts=None, lifetime=None,
overlap=None, expire_ts=None, **kwargs):
"""Create a :class:`DNSSECKey` object
:param key_type: The type of this key. (KSK or ZSK)
:param algorithm: One of (RSA/SHA-1, RSA/SHA-256, RSA/SHA-512, DSA)
:param bits: length of the key. Valid values: 1024, 2048, or 4096
:param start_ts: An epoch time when key is to be valid
:param lifetime: Lifetime of the key expressed in seconds
:param overlap: Time before key expiration when a replacement key is
prepared, expressed in seconds. Default = 7 days.
:param expire_ts: An epoch time when this key is to expire
:param dnskey: The KSK or ZSK record data
:param ds: One of the DS records for the KSK. ZSKs will have this
value intialized, but with null values.
:param all_ds: All the DS records associated with this KSK. Applies
only to KSK, ZSK will have a zero-length list.
"""
super(DNSSECKey, self).__init__()
self.key_type = key_type
self.algorithm = algorithm
if not isinstance(bits, int):
bits = int(bits)
self.bits = bits
self.start_ts = start_ts
self.lifetime = lifetime
self.overlap = overlap
self.expire_ts = expire_ts
self.dnssec_key_id = self.dnskey = self.ds = self.all_ds = None
for key, val in kwargs.items():
setattr(self, key, val)
@property
def _json(self):
"""The JSON representation of this :class:`DNSSECKey` object"""
json_blob = {'type': self.key_type,
'algorithm': self.algorithm,
'bits': self.bits}
if self.start_ts:
json_blob['start_ts'] = self.start_ts
if self.lifetime:
json_blob['lifetime'] = self.lifetime
if self.overlap:
json_blob['overlap'] = self.overlap
if self.expire_ts:
json_blob['expire_ts'] = self.expire_ts
return json_blob
def _update(self, data):
"""Semi-private _update method"""
for key, val in data.items():
if key == 'type':
setattr(self, 'key_type', val)
elif key == 'bits':
setattr(self, key, int(val))
else:
setattr(self, key, val)
def __str__(self):
"""str override"""
return force_unicode('<DNSSECKey>: {}').format(self.algorithm)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
class DNSSEC(object):
"""A DynECT System DNSSEC Service"""
def __init__(self, zone, *args, **kwargs):
"""Create a :class:`DNSSEC` object
:param zone: the zone this service will be attached to
:param keys: a list of :class:`DNSSECKey`'s for the service
:param contact_nickname: Name of contact to receive notifications
:param notify_events: A ``list`` of events that trigger notifications.
Valid values are "create" (a new version of a key was created),
"expire" (a key was automatically expired), or "warning" (early
warnings (2 weeks, 1 week, 1 day) of events)
"""
super(DNSSEC, self).__init__()
self.valid_notify_events = ('create', 'expire', 'warning')
self._zone = zone
self._contact_nickname = self._notify_events = None
self._keys = APIList(DynectSession.get_session, 'keys')
self._active = None
self.uri = '/DNSSEC/{}/'.format(self._zone)
if 'api' in kwargs:
del kwargs['api']
self._build(kwargs)
elif len(args) == 0 and len(kwargs) == 0:
self._get()
else:
self._post(*args, **kwargs)
self._keys.uri = self.uri
def _post(self, keys, contact_nickname, notify_events=None):
"""Create a new :class:`DNSSEC` Service on the Dynect System"""
self._keys += keys
self._contact_nickname = contact_nickname
self._notify_events = notify_events
api_args = {'keys': [key._json for key in self._keys],
'contact_nickname': self._contact_nickname}
for key, val in self.__dict__.items():
if val is not None and not hasattr(val, '__call__') and \
key.startswith('_'):
if key == '_user_name' or key == '_keys':
pass
else:
api_args[key[1:]] = val
# Need to cast to CSV for API
if self._notify_events is not None:
api_args['notify_events'] = ','.join(self._notify_events)
response = DynectSession.get_session().execute(self.uri, 'POST',
api_args)
self._build(response['data'])
def _get(self):
"""Update this object from an existing :class:`DNSSEC` service from the
Dynect System.
"""
api_args = {}
response = DynectSession.get_session().execute(self.uri, 'GET',
api_args)
self._build(response['data'])
def _build(self, data):
"""Iterate over API data responses and update this object according to
the data returned
"""
for key, val in data.items():
if key == 'keys':
self._keys = APIList(DynectSession.get_session, 'keys')
for key_data in val:
key_data['key_type'] = key_data['type']
del key_data['type']
self._keys.append(DNSSECKey(**key_data))
elif key == 'active':
self._active = Active(val)
else:
setattr(self, '_' + key, val)
self.uri = '/DNSSEC/{}/'.format(self._zone)
self._keys.uri = self.uri
@property
def zone(self):
"""The name of the zone where this service exists. This is a read-only
property
"""
return self._zone
@zone.setter
def zone(self, value):
pass
@property
def active(self):
"""The current status of this :class:`DNSSEC` service. When setting
directly, rather than using activate/deactivate valid arguments are 'Y'
or True to activate, or 'N' or False to deactivate. Note: If your
service is already active and you try to activate it, nothing will
happen. And vice versa for deactivation.
:returns: An :class:`Active` object representing the current state of
this :class:`DNSSEC` Service
"""
self._get() # Do a get to ensure an up-to-date status is returned
return self._active
@active.setter
def active(self, value):
deactivate = ('N', False)
activate = ('Y', True)
if value in deactivate and self.active:
self.deactivate()
elif value in activate and not self.active:
self.activate()
@property
def contact_nickname(self):
"""Name of contact to receive notifications"""
return self._contact_nickname
@contact_nickname.setter
def contact_nickname(self, value):
self._contact_nickname = value
api_args = {'contact_nickname': self._contact_nickname}
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
self._build(response['data'])
@property
def notify_events(self):
"""A list of events that trigger notifications. Valid values are:
create (a new version of a key was created), expire (a key was
automatically expired), warning (early warnings (2 weeks, 1 week, 1
day) of events)
"""
return self._notify_events
@notify_events.setter
def notify_events(self, value):
for val in value:
if val not in self.valid_notify_events:
raise DynectInvalidArgumentError('notify_events', val,
self.valid_notify_events)
value = ','.join(value)
api_args = {'notify_events': value}
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
self._build(response['data'])
@property
def keys(self):
"""A List of :class:`DNSSECKey`'s associated with this :class:`DNSSEC`
service
"""
# Need this check for get_all_dnssec calls which do not return key info
if self._keys is None or self._keys == []:
self._get()
return self._keys
@keys.setter
def keys(self, value):
if isinstance(value, list) and not isinstance(value, APIList):
self._keys = APIList(DynectSession.get_session, 'keys', None,
value)
elif isinstance(value, APIList):
self._keys = value
self._keys.uri = self.uri
def activate(self):
"""Activate this :class:`DNSSEC` service"""
api_args = {'activate': 'Y'}
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
self._build(response['data'])
def deactivate(self):
"""Deactivate this :class:`DNSSEC` service"""
api_args = {'deactivate': 'Y'}
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
self._build(response['data'])
def timeline_report(self, start_ts=None, end_ts=None):
"""Generates a report of events this :class:`DNSSEC` service has
performed and has scheduled to perform
:param start_ts: datetime.datetime instance identifying point in time
for the start of the timeline report
:param end_ts: datetime.datetime instance identifying point in time
for the end of the timeline report. Defaults to
datetime.datetime.now()
"""
api_args = {'zone': self._zone}
if start_ts is not None:
api_args['start_ts'] = unix_date(start_ts)
if end_ts is not None:
api_args['end_ts'] = unix_date(end_ts)
elif end_ts is None and start_ts is not None:
api_args['end_ts'] = unix_date(datetime.now())
uri = '/DNSSECTimelineReport/'
response = DynectSession.get_session().execute(uri, 'POST', api_args)
return response['data']
def delete(self):
"""Delete this :class:`DNSSEC` Service from the DynECT System"""
api_args = {}
DynectSession.get_session().execute(self.uri, 'DELETE', api_args)
def __str__(self):
"""str override"""
return force_unicode('<DNSSEC>: {}').format(self._zone)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
|
|
from datetime import datetime
from zeit.brightcove.convert import Video as BCVideo
from zeit.content.video.video import Video as CMSVideo
import mock
import pytz
import zeit.brightcove.testing
import zeit.cms.tagging.testing
import zeit.cms.testing
import zeit.content.video.playlist
class VideoTest(zeit.cms.testing.FunctionalTestCase,
zeit.cms.tagging.testing.TaggingHelper):
layer = zeit.brightcove.testing.LAYER
def test_converts_cms_fields_to_bc_names(self):
cms = CMSVideo()
cms.title = u'title'
cms.teaserText = u'teaser'
bc = BCVideo.from_cms(cms)
self.assertEqual('title', bc.data['name'])
self.assertEqual('teaser', bc.data['description'])
def test_readonly_fields_are_removed_for_writing(self):
bc = BCVideo()
bc.data['id'] = 'foo'
self.assertNotIn('id', bc.write_data)
def test_looks_up_type_conversion_by_field(self):
cms = CMSVideo()
cms.commentsAllowed = True
bc = BCVideo.from_cms(cms)
self.assertEqual('1', bc.data['custom_fields']['allow_comments'])
def test_looks_up_folder_from_product_config(self):
bc = BCVideo()
bc.data['id'] = 'myvid'
bc.data['created_at'] = '2017-05-15T08:24:55.916Z'
self.assertEqual('http://xml.zeit.de/video/2017-05/myvid', bc.uniqueId)
self.assertEqual(zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/video/2017-05/'), bc.__parent__)
def test_converts_authors(self):
from zeit.content.author.author import Author
self.repository['a1'] = Author()
self.repository['a2'] = Author()
cms = CMSVideo()
cms.authorships = (
cms.authorships.create(self.repository['a1']),
cms.authorships.create(self.repository['a2'])
)
bc = BCVideo.from_cms(cms)
self.assertEqual(
'http://xml.zeit.de/a1 http://xml.zeit.de/a2',
bc.data['custom_fields']['authors'])
def test_converts_keywords(self):
cms = CMSVideo()
self.setup_tags('staatsanwaltschaft', 'parlament')
bc = BCVideo.from_cms(cms)
self.assertEqual(
'staatsanwaltschaft;parlament',
bc.data['custom_fields']['cmskeywords'])
def test_converts_product(self):
cms = CMSVideo()
cms.product = zeit.cms.content.sources.PRODUCT_SOURCE(None).find(
'TEST')
bc = BCVideo.from_cms(cms)
self.assertEqual('TEST', bc.data['custom_fields']['produkt-id'])
def test_product_defaults_to_reuters(self):
bc = BCVideo()
bc.data['reference_id'] = '1234'
cms = CMSVideo()
bc.apply_to_cms(cms)
self.assertEqual('Reuters', cms.product.id)
def test_converts_serie(self):
cms = CMSVideo()
cms.serie = zeit.content.video.interfaces.IVideo['serie'].source(
None).find('Chefsache')
bc = BCVideo.from_cms(cms)
self.assertEqual('Chefsache', bc.data['custom_fields']['serie'])
def test_converts_channels(self):
cms = CMSVideo()
cms.channels = (('Deutschland', 'Meinung'), ('International', None))
bc = BCVideo.from_cms(cms)
self.assertEqual(
'Deutschland Meinung;International',
bc.data['custom_fields']['channels'])
def test_converts_related(self):
cms = CMSVideo()
related = zeit.cms.related.interfaces.IRelatedContent(cms)
related.related = (
zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/2007/01/eta-zapatero'),)
bc = BCVideo.from_cms(cms)
self.assertEqual(
'http://xml.zeit.de/online/2007/01/eta-zapatero',
bc.data['custom_fields']['ref_link1'])
def test_converts_advertisement(self):
cms = CMSVideo()
cms.has_advertisement = False
bc = BCVideo.from_cms(cms)
self.assertEqual('FREE', bc.data['economics'])
def test_converts_timestamps(self):
bc = BCVideo()
bc.data['created_at'] = '2017-05-15T08:24:55.916Z'
self.assertEqual(
datetime(2017, 5, 15, 8, 24, 55, 916000, tzinfo=pytz.UTC),
bc.date_created)
def test_only_strings_in_custom_fields(self):
from zeit.content.author.author import Author
self.repository['a1'] = Author()
cms = CMSVideo()
cms.authorships = (cms.authorships.create(self.repository['a1']),)
cms.product = zeit.cms.content.sources.PRODUCT_SOURCE(None).find(
'TEST')
bc = BCVideo.from_cms(cms)
for key, value in bc.data['custom_fields'].items():
self.assertIsInstance(
value, basestring, '%s should be a string' % key)
def test_applies_values_to_cms_object(self):
from zeit.content.author.author import Author
self.repository['a1'] = Author()
cms = CMSVideo()
bc = BCVideo()
bc.data = {
'id': 'myvid',
'name': 'title',
'created_at': '2017-05-15T08:24:55.916Z',
'schedule': {'ends_at': '2018-03-13T23:00:00.000Z',
'starts_at': None},
'state': 'ACTIVE',
'economics': 'AD_SUPPORTED',
'custom_fields': {
'allow_comments': '1',
'authors': 'http://xml.zeit.de/a1',
'channels': 'Deutschland Meinung;International',
'cmskeywords': 'testtag;testtag2',
'produkt-id': 'TEST',
'ref_link1': 'http://xml.zeit.de/online/2007/01/eta-zapatero',
'serie': 'Chefsache',
},
'images': {
'thumbnail': {'src': 'http://example.com/thumbnail'},
'poster': {'src': 'http://example.com/still'},
},
'sources': [{
'src': 'http://example.com/rendition',
}],
}
bc.apply_to_cms(cms)
self.assertEqual('myvid', cms.external_id)
self.assertEqual('title', cms.title)
self.assertEqual(True, cms.commentsAllowed)
self.assertEqual(['http://xml.zeit.de/a1'],
[x.target.uniqueId for x in cms.authorships])
self.assertEqual(['testtag', 'testtag2'],
[x.code for x in cms.keywords])
self.assertEqual((('Deutschland', 'Meinung'), ('International', None)),
cms.channels)
self.assertEqual('TEST', cms.product.id)
self.assertEqual(True, cms.has_advertisement)
self.assertEqual(
(zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/2007/01/eta-zapatero'),),
zeit.cms.related.interfaces.IRelatedContent(cms).related)
self.assertEqual('Chefsache', cms.serie.serienname)
self.assertEqual(
datetime(2018, 3, 13, 23, 0, tzinfo=pytz.UTC), cms.expires)
def test_creates_deleted_video_on_notfound(self):
with mock.patch('zeit.brightcove.connection.CMSAPI.get_video') as get:
with mock.patch('zeit.brightcove.resolve.query_video_id') as query:
get.return_value = None
query.return_value = (
'http://xml.zeit.de/online/2007/01/Somalia')
bc = BCVideo.find_by_id('nonexistent')
self.assertIsInstance(bc, zeit.brightcove.convert.DeletedVideo)
self.assertEqual(
'http://xml.zeit.de/online/2007/01/Somalia', bc.uniqueId)
self.assertEqual(
'http://xml.zeit.de/online/2007/01/', bc.__parent__.uniqueId)
def test_missing_values_use_field_default(self):
bc = BCVideo()
cms = CMSVideo()
bc.apply_to_cms(cms)
self.assertTrue(cms.commentsAllowed)
class PlaylistTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.brightcove.testing.LAYER
def test_converts_video_list(self):
bc = zeit.brightcove.convert.Playlist()
bc.data['video_ids'] = ['search-must-be-mocked']
playlist = zeit.content.video.playlist.Playlist()
with mock.patch('zeit.brightcove.resolve.query_video_id') as query:
query.return_value = 'http://xml.zeit.de/online/2007/01/Somalia'
bc.apply_to_cms(playlist)
self.assertEqual(['http://xml.zeit.de/online/2007/01/Somalia'],
[x.uniqueId for x in playlist.videos])
|
|
from pandac.PandaModules import getConfigShowbase
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import fastRepr
import sys
import types
import traceback
notify = directNotify.newCategory("ExceptionVarDump")
config = getConfigShowbase()
reentry = 0
def _varDump__init__(self, *args, **kArgs):
global reentry
if reentry > 0:
return
reentry += 1
# frame zero is this frame
f = 1
self._savedExcString = None
self._savedStackFrames = []
while True:
try:
frame = sys._getframe(f)
except ValueError, e:
break
else:
f += 1
self._savedStackFrames.append(frame)
self._moved__init__(*args, **kArgs)
reentry -= 1
sReentry = 0
def _varDump__print(exc):
global sReentry
global notify
if sReentry > 0:
return
sReentry += 1
if not exc._savedExcString:
s = ''
foundRun = False
for frame in reversed(exc._savedStackFrames):
filename = frame.f_code.co_filename
codename = frame.f_code.co_name
if not foundRun and codename != 'run':
# don't print stack frames before run(),
# they contain builtins and are huge
continue
foundRun = True
s += '\nlocals for %s:%s\n' % (filename, codename)
locals = frame.f_locals
for var in locals:
obj = locals[var]
rep = fastRepr(obj)
s += '::%s = %s\n' % (var, rep)
exc._savedExcString = s
exc._savedStackFrames = None
notify.info(exc._savedExcString)
sReentry -= 1
oldExcepthook = None
# store these values here so that Task.py can always reliably access them
# from its main exception handler
wantStackDumpLog = False
wantStackDumpUpload = False
variableDumpReasons = []
dumpOnExceptionInit = False
class _AttrNotFound:
pass
def _excepthookDumpVars(eType, eValue, tb):
origTb = tb
excStrs = traceback.format_exception(eType, eValue, origTb)
s = 'printing traceback in case variable repr crashes the process...\n'
for excStr in excStrs:
s += excStr
notify.info(s)
s = 'DUMPING STACK FRAME VARIABLES'
#import pdb;pdb.set_trace()
#foundRun = False
foundRun = True
while tb is not None:
frame = tb.tb_frame
code = frame.f_code
# this is a list of every string identifier used in this stack frame's code
codeNames = set(code.co_names)
# skip everything before the 'run' method, those frames have lots of
# not-useful information
if not foundRun:
if code.co_name == 'run':
foundRun = True
else:
tb = tb.tb_next
continue
s += '\n File "%s", line %s, in %s' % (
code.co_filename, frame.f_lineno, code.co_name)
stateStack = Stack()
# prime the stack with the variables we should visit from the frame's data structures
# grab all of the local, builtin and global variables that appear in the code's name list
name2obj = {}
for name, obj in frame.f_builtins.items():
if name in codeNames:
name2obj[name] = obj
for name, obj in frame.f_globals.items():
if name in codeNames:
name2obj[name] = obj
for name, obj in frame.f_locals.items():
if name in codeNames:
name2obj[name] = obj
# show them in alphabetical order
names = name2obj.keys()
names.sort()
# push them in reverse order so they'll be popped in the correct order
names.reverse()
traversedIds = set()
for name in names:
stateStack.push([name, name2obj[name], traversedIds])
while len(stateStack) > 0:
name, obj, traversedIds = stateStack.pop()
#notify.info('%s, %s, %s' % (name, fastRepr(obj), traversedIds))
r = fastRepr(obj, maxLen=10)
if type(r) is types.StringType:
r = r.replace('\n', '\\n')
s += '\n %s = %s' % (name, r)
# if we've already traversed through this object, don't traverse through it again
if id(obj) not in traversedIds:
attrName2obj = {}
for attrName in codeNames:
attr = getattr(obj, attrName, _AttrNotFound)
if (attr is not _AttrNotFound):
# prevent infinite recursion on method wrappers (__init__.__init__.__init__...)
try:
className = attr.__class__.__name__
except:
pass
else:
if className == 'method-wrapper':
continue
attrName2obj[attrName] = attr
if len(attrName2obj):
# show them in alphabetical order
attrNames = attrName2obj.keys()
attrNames.sort()
# push them in reverse order so they'll be popped in the correct order
attrNames.reverse()
ids = set(traversedIds)
ids.add(id(obj))
for attrName in attrNames:
obj = attrName2obj[attrName]
stateStack.push(['%s.%s' % (name, attrName), obj, ids])
tb = tb.tb_next
if foundRun:
s += '\n'
if wantStackDumpLog:
notify.info(s)
if wantStackDumpUpload:
excStrs = traceback.format_exception(eType, eValue, origTb)
for excStr in excStrs:
s += excStr
timeMgr = None
try:
timeMgr = base.cr.timeManager
except:
try:
timeMgr = simbase.air.timeManager
except:
pass
if timeMgr:
timeMgr.setStackDump(s)
oldExcepthook(eType, eValue, origTb)
def install(log, upload):
global oldExcepthook
global wantStackDumpLog
global wantStackDumpUpload
global dumpOnExceptionInit
wantStackDumpLog = log
wantStackDumpUpload = upload
dumpOnExceptionInit = config.GetBool('variable-dump-on-exception-init', 0)
if dumpOnExceptionInit:
# this mode doesn't completely work because exception objects
# thrown by the interpreter don't get created until the
# stack has been unwound and an except block has been reached
if not hasattr(Exception, '_moved__init__'):
Exception._moved__init__ = Exception.__init__
Exception.__init__ = _varDump__init__
else:
if sys.excepthook is not _excepthookDumpVars:
oldExcepthook = sys.excepthook
sys.excepthook = _excepthookDumpVars
|
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.gis.db import models
from django.contrib.gis.measure import D
from django.db.utils import IntegrityError
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.utils.html import strip_tags
from django.db.models import Count
from django.contrib.gis.db.models import Extent, Union
from django.contrib.gis.geos import fromstr
from django.db.models import Q
import random
from sorl.thumbnail import get_thumbnail
import re
import logging
logger = logging.getLogger(__name__)
# south introspection rules
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^django\.contrib\.gis\.db\.models\.fields\.PointField'])
add_introspection_rules([], ['^django\.contrib\.gis\.db\.models\.fields\.MultiPolygonField'])
except ImportError:
pass
def get_extent_for_openlayers(geoqueryset, srid):
"""
Accepts a GeoQuerySet and SRID.
Returns the extent as a GEOS object in the Google Maps projection system favored by OpenLayers.
The result can be directly passed out for direct use in a JavaScript map.
"""
extent = fromstr('MULTIPOINT (%s %s, %s %s)' % geoqueryset.extent(), srid=srid)
extent.transform(4326)
return extent
class Event(models.Model):
name = models.CharField(max_length=100, blank=True, null=True)
slug = models.SlugField(max_length=100, blank=True, null=True)
description = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.name
def save(self):
"""
Auto-populate an empty slug field from the MyModel name and
if it conflicts with an existing slug then append a number and try
saving again.
"""
if not self.slug:
self.slug = slugify(self.name) # Where self.name is the field used for 'pre-populate from'
while True:
try:
super(Event, self).save()
# Assuming the IntegrityError is due to a slug fight
except IntegrityError:
match_obj = re.match(r'^(.*)-(\d+)$', self.slug)
if match_obj:
next_int = int(match_obj.group(2)) + 1
self.slug = match_obj.group(1) + '-' + str(next_int)
else:
self.slug += '-2'
else:
break
class Neighborhood(models.Model):
"""
Neighborhood or town if no neighborhoods are available.
"""
n_id = models.CharField('Neighborhood ID', max_length=20, help_text='ID derived from GIS, not necessarily unique since we are mixing neighborhood types.')
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=100, blank=True, null=True)
geometry = models.MultiPolygonField(srid=26986)
objects = models.GeoManager()
class Meta:
verbose_name = _('Neighborhood')
verbose_name_plural = _('Neighborhoods')
ordering = ['name']
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('neighborhood', [slugify(self.name)])
def save(self, *args, **kwargs):
"""Auto-populate an empty slug field from the MyModel name and
if it conflicts with an existing slug then append a number and try
saving again.
"""
if not self.slug:
self.slug = slugify(self.name) # Where self.name is the field used for 'pre-populate from'
super(Neighborhood, self).save(*args, **kwargs)
class Parktype(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
class Meta:
verbose_name = _('Parktype')
verbose_name_plural = _('Parktypes')
def __unicode__(self):
return self.name
class Parkowner(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
class Meta:
verbose_name = _('Parkowner')
verbose_name_plural = _('Parkowners')
def __unicode__(self):
return self.name
class Friendsgroup(models.Model):
name = models.CharField(max_length=100)
url = models.URLField(blank=True, null=True)
class Parkimage(models.Model):
""" Image taken in a park.
"""
image = models.ImageField(upload_to='parkimages')
caption = models.TextField(default='', blank=True)
hero_image = models.BooleanField(default=False)
default = models.BooleanField(default=False)
hide = models.BooleanField(default=False)
class Meta:
verbose_name = _('Parkimage')
verbose_name_plural = _('Parkimages')
ordering = ['pk']
def __unicode__(self):
caption = getattr(self, 'caption', '')
return '%i: %s' % (self.pk, caption)
def get_thumbnail(self, include_large=False):
TN_DEFAULT_WIDTH = 300
TN_DEFAULT_HEIGHT = 200
TN_DEFAULT_SIZE = '300x200'
LARGE_SIZE = '950x600'
TN_MED_LANDSCAPE = '600x400'
TN_MED_PORTRAIT = '300x400'
PLACEHOLDER = 'http://placehold.it/300x200'
image = {
'src': PLACEHOLDER,
'masonry_src': PLACEHOLDER,
'caption': self.caption,
'default': self.default,
'width': TN_DEFAULT_WIDTH,
'height': TN_DEFAULT_HEIGHT
}
try:
image['large_src'] = get_thumbnail(self.image, LARGE_SIZE, crop='center', quality=100).url
tn = get_thumbnail(self.image, TN_DEFAULT_SIZE, crop='center', quality=80)
image['src'], image['masonry_src'] = tn.url, tn.url
#if
if self.default:
image['width'], image['height'] = tn.width, tn.height
else:
if random.random() < 0.75:
image['ratio'] = self.image.width / self.image.height
if image['ratio'] == 0:
medium_image_portrait = get_thumbnail(self.image, TN_MED_PORTRAIT, crop='center', quality=100)
image['src'], image['masonry_src'] = tn.url, medium_image_portrait.url
image['width'], image['height'] = medium_image_portrait.width, medium_image_portrait.height
else:
medium_image_landscape = get_thumbnail(self.image, TN_MED_LANDSCAPE, crop='center', quality=100)
image['src'], image['masonry_src'] = tn.url, medium_image_landscape.url
image['width'], image['height'] = medium_image_landscape.width, medium_image_landscape.height
except Exception as e:
return None
return image
def thumbnail(self):
if self.image:
thumb = get_thumbnail(self.image.file, settings.ADMIN_THUMBS_SIZE, crop='center', quality=80)
return u'<img width="%s" height="%s" src="%s" alt="%s" />' % (thumb.width, thumb.height, thumb.url, self.caption)
else:
return None
thumbnail.short_description = 'Image'
thumbnail.allow_tags = True
get_thumbnail.allow_tags = True
def get_parks_string(self):
parks = [p.name for p in self.parks.all()]
return ", ".join(parks)
get_parks_string.short_description = 'Parks'
class Park(models.Model):
"""
Park or similar Open Space.
"""
ACCESS_CHOICES = (
('y', 'Yes'),
('n', 'No'),
('u', 'Unknown'),
)
os_id = models.CharField('OS ID', max_length=9, null=True, blank=True, help_text='Refers to MassGIS OS_ID')
name = models.CharField(max_length=100, blank=True, null=True)
slug = models.SlugField(max_length=100, blank=True, null=True, unique=True)
alt_name = models.CharField('Alternative name', max_length=100, blank=True, null=True)
description = models.TextField(blank=True, null=True)
address = models.CharField(max_length=50, blank=True, null=True)
phone = models.CharField(max_length=50, blank=True, null=True)
neighborhoods = models.ManyToManyField(Neighborhood, related_name='neighborhoods', blank=True)
parktype = models.ForeignKey(Parktype, blank=True, null=True)
parkowner = models.ForeignKey(Parkowner, blank=True, null=True)
friendsgroup = models.ForeignKey("Friendsgroup", blank=True, null=True)
events = models.ManyToManyField("Event", related_name="events", blank=True, null=True)
access = models.CharField(max_length=1, blank=True, null=True, choices=ACCESS_CHOICES)
area = models.FloatField(blank=True, null=True)
images = models.ManyToManyField(Parkimage, blank=True, null=True, related_name='parks')
featured = models.BooleanField(default=False)
geometry = models.MultiPolygonField(srid=26986)
objects = models.GeoManager()
class Meta:
verbose_name = _('Park')
verbose_name_plural = _('Parks')
def __unicode__(self):
return self.name
@classmethod
def featured_with_images(cls):
return (
cls.objects
.annotate(num_of_images=Count('images'))
.filter(featured=True, num_of_images__gt=0)
)
@models.permalink
def get_absolute_url(self):
return ('park', ['%s-%d' % (slugify(self.name), self.id)])
def area_acres(self):
return round((self.area / 4047), 1)
def lat_long(self):
self.geometry.transform(4326)
return [self.geometry.centroid.y, self.geometry.centroid.x]
def point_on_surface(self):
self.geometry.transform(4326)
return list(self.geometry.point_on_surface)
def get_image_thumbnails(self, include_large=False):
images = []
for i in self.images.filter(default=False):
try:
images.append(i.get_thumbnail(include_large=include_large))
except IOError, e:
logger.error(e)
except Exception as e:
logger.error(e)
if not images:
for i in self.images.filter(default=True):
try:
images.append(i.get_thumbnail(include_large=include_large))
except IOError, e:
logger.error(e)
except Exception as e:
logger.error(e)
return images
def to_external_document(self, user, include_large=False, include_extra_info=False):
change_url = None
if user.has_perm('parks.change_park'):
change_url = reverse('admin:parks_park_change', args=(self.id,))
def image_format(park):
image = park.get_image_thumbnails(include_large=include_large)[:1]
return image[0] if image else {}
facilities = Activity.objects.filter(activity__park=self.id).distinct()
doc = {
'id': self.id,
'url': self.get_absolute_url(),
'name': self.name,
'area': self.area_acres(),
'description': self.description,
'images': self.get_image_thumbnails(include_large=include_large),
'access': self.get_access_display(),
'address': self.address,
'owner': self.parkowner.name,
'point_on_surface': self.point_on_surface(),
'change_url': change_url
}
if include_extra_info:
filtered_queryset = Park.objects.filter(name=self.name) # doesn't yet transform correctly after aggregated
extent = get_extent_for_openlayers(filtered_queryset, 26986)
doc['nearby_parks'] = [{'id': p.pk, 'url': p.get_absolute_url(), 'name': p.name, 'image': image_format(p)} for p in self.nearest_parks_by_distance(0.25)]
doc['recommended_parks'] = [{'id': p.pk, 'url': p.get_absolute_url(), 'name': p.name, 'image': image_format(p)} for p in self.recommended_parks()]
doc['activities'] = [{'name': p.name, 'slug': p.slug, 'id': p.id } for p in facilities]
doc['bbox'] = list(extent.coords)
return doc
def nearest_parks_by_distance(self, distance_in_miles):
return Park.objects.filter(geometry__distance_lt=(self.geometry, D(mi=distance_in_miles))).filter(~Q(name=self.name)).distinct('name')
def recommended_parks(self):
return self.nearest_parks_by_distance(0.25).filter(parktype=self.parktype).filter(~Q(name=self.name)).distinct('name')
# all_facilities = []
# for id in facilities:
# all_facilities.push(id)
# return Parks.objects.filter(pk__in=self.id).distinct()
def get_facilities(self, park_id):
""" Returns facilities as JSON for park id
"""
park = Park.objects.get(pk=park_id)
facilities = Facility.objects.transform(4326).filter(park=park).select_related('facilitytype').prefetch_related('activity')
features = []
for f in facilities:
activities = [a.name for a in f.activity.all()]
geojson_prop = dict(
name=f.name,
icon=f.facilitytype.icon.url,
activities=activities,
status=f.status,
access=f.access,
notes=f.notes,
)
response = dict(type='FeatureCollection')
return facilities
def save(self, *args, **kwargs):
self.area = self.geometry.area
# FIXME: we need a better slugify routine
self.slug = '%s-%d' % (slugify(self.name), self.id)
super(Park, self).save(*args, **kwargs)
try:
# cache containing neighorhood
# doesn't work with admin forms, m2m get cleared during admin save
# FIXME: improve routine - compare neighborhoods we intersect with against already stored neighborhoods
neighborhoods = Neighborhood.objects.filter(geometry__intersects=self.geometry)
self.neighborhoods.clear()
self.neighborhoods.add(*neighborhoods)
except TypeError:
self.neighborhoods = None
class Activity(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
slug = models.SlugField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = _('Activity')
verbose_name_plural = _('Activities')
ordering = ['name']
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name) # Where self.name is the field used for 'pre-populate from'
super(Activity, self).save(*args, **kwargs)
class Facilitytype(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
icon = models.ImageField(blank=False, upload_to="icons", null=False, help_text="Must be 32x37px to function properly")
class Meta:
verbose_name = _('Facilitytype')
verbose_name_plural = _('Facilitytypes')
def __unicode__(self):
return self.name
class Facility(models.Model):
"""
Facility in or outside a park.
"""
name = models.CharField(max_length=50, blank=True, null=True)
facilitytype = models.ForeignKey(Facilitytype)
activity = models.ManyToManyField(Activity, related_name='activity')
location = models.CharField(max_length=50, blank=True, null=True, help_text='Address, nearby Landmark or similar location information.')
status = models.CharField(max_length=50, blank=True, null=True) # FIXME: choices?
park = models.ForeignKey(Park, blank=True, null=True)
notes = models.TextField(blank=True,)
access = models.TextField(blank=True,)
geometry = models.PointField(srid=26986)
objects = models.GeoManager()
class Meta:
verbose_name = _('Facility')
verbose_name_plural = _('Facilities')
def activity_string(self):
out = []
for activity in self.activity.all():
out.append(activity.name)
return ", ".join(out)
activity_string.short_description = 'Activities'
def parktype_string(self):
return self.park.parktype
def icon_url(self):
if self.facilitytype.icon:
return '%s' % (self.facilitytype.icon.url,)
return '%sparks/img/icons/%s.png' % (settings.STATIC_URL, slugify(self.facilitytype))
def admin_url(self):
return reverse('admin:parks_facility_change', args=(self.id,))
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
try:
# cache containing park
self.park = Park.objects.get(geometry__contains=self.geometry)
except:
self.park = None
super(Facility, self).save(*args, **kwargs)
class Story(models.Model):
RATING_CHOICES = (
('1', "Happy"),
('2', "Blah"),
('3', "Idea"),
('4', "Sad"),
)
date = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=False, null=False)
rating = models.CharField(max_length=1, default='0', blank=False, null=False, choices=RATING_CHOICES)
text = models.TextField(blank=False, null=False)
email = models.EmailField(max_length=100, blank=False, null=False)
park = models.ForeignKey(Park, blank=True, null=False)
objectionable_content = models.BooleanField(default=False)
class Meta:
ordering = ('-date',)
@models.permalink
def get_absolute_url(self):
return ('parks.views.story', [str(self.id)])
|
|
'''
This package provides a function and a base class:
unify and AbstractSegmentation.
unify(data_sets, directory, size=None, segmentation_size=None, crop=False,
min_frequency=None, min_coverage=None, synonyms=None,
test_limit=None, single_process=False, verbose=False):
unify creates a stadnard format multichannel segementation
out of heterogenous labeled image sources.
Input:
data_sets is a list of instances of subclasses of
AbstractSegmentation (or similar objects). Five methods
must be provided on each object.
names(cat, j) - a list of names that describe label j
as it is used within category cat in this dataset.
size() - number of images in the dataset.
filename(i) - the jpg filename for the i'th image in the dataset.
metadata(i) - a small pickleable record from which the
segmentation of the ith item can be derived, withou reference
to a specific corpus instance.
@classmethod
resolve_segmentation(m) - resolves to a dict and a shape of a segmentation.
The dict contains string key names and numpy-array vals describing
the segmentation of the image drived from metadata m. The keys
of this dict are arbitrary short strings, but for example could
look like:
{
'scene': 1293, # 0-d: applies one label to the whole image
'object': (1024x1280 int16), # 2-d: one label per pixel
'part': (3x1024x1280 int16), # 3-d: layers of labels, dominant first
'texture': [14,22] # 1-d: multiple whole-image labels, dominant first
}
The reason for the separation between metadata(i) and
resolve_segmentation(m) so that we can run metadata(i) quickly on the
main process, and then shard resolve_segmentation(m) out to worker
processes.
Output is the following directory structure:
'''
README_TEXT = '''
This directory contains the following data and metadata files:
images/[datasource]/...
images drawn from a specific datasource are reformatted
and scaled and saved as jpeg and png files in subdirectories
of the images directory.
index.csv
contains a list of all the images in the dataset, together with
available labeled data, in the form:
image,split,ih,iw,sh,sw,[color,object,material,part,scene,texture]
for examplle:
dtd/g_0106.jpg,train,346,368,346,368,dtd/g_0106_color.png,,,,,314;194
The first column is always the original image filename relative to
the images/ subdirectory; then image height and width and segmentation
heigh and width dimensions are followed by six columns for label
data in the six categories. Label data can be per-pixel or per-image
(assumed to apply to every pixel in the image), and 0 or more labels
can be specified per category. If there are no labels, the field is ''.
If there are multiple labels, they are separated by semicolons,
and it is assumed that dominant interpretations are listed first.
Per-image labels are represented by a decimal number; and per-image
labels are represented by a filename for an image which encodes
the per-pixel labels in the (red + 256 * green) channels.
category.csv
name,first,last,count,frequency
for example:
object,12,1138,529,208688
In the generic case there may not be six categories; this directory
may contain any set of categories of segmentations. This file
lists all segmentation categories found in the data sources,
along with statistics on now many class labels apply to
each category, and in what range; as well as how many
images mention a label of that category
label.csv
number,name,category,frequency,coverage,syns
for example:
10,red-c,color(289),289,9.140027,
21,fabric,material(36);object(3),39,4.225474,cloth
This lists all label numbers and corresponding names, along
with some statistics including the number of images for
which the label appears in each category; the total number
of images which have the label; and the pixel portions
of images that have the label.
c_[category].csv (for example, map_color.csv)
code,number,name,frequency,coverage
for example:
4,31,glass,27,0.910724454131
Although labels are store under a unified code, this file
lists a standard dense coding that can be used for a
specific subcategory of labels.
'''
import codecs
from functools import partial
import itertools
from multiprocessing import Pool, cpu_count
import numpy
import operator
import os
import re
import subprocess
import time
from PIL import Image
from collections import OrderedDict
from scipy.misc import imread, imresize, imsave
from scipy.ndimage.interpolation import zoom
import signal
import shutil
import sys
from unicsv import DictUnicodeWriter
def unify(data_sets, directory, size=None, segmentation_size=None, crop=False,
splits=None, min_frequency=None, min_coverage=None,
synonyms=None, test_limit=None, single_process=False, verbose=False):
# Make sure we have a directory to work in
directory = os.path.expanduser(directory)
ensure_dir(directory)
# Step 0: write a README file with generated information.
write_readme_file([
('data_sets', data_sets), ('size', size),
('segmentation_size', segmentation_size), ('crop', crop),
('splits', splits),
('min_frequency', min_frequency), ('min_coverage', min_coverage),
('synonyms', synonyms), ('test_limit', test_limit),
('single_process', single_process)],
directory=directory, verbose=verbose)
# Clear old images data
try:
if verbose:
print 'Removing old images.'
shutil.rmtree(os.path.join(directory, 'images'), ignore_errors=True)
except:
pass
ensure_dir(os.path.join(directory, 'images'))
# Phase 1: Count label statistics
# frequency = number of images touched by each label
# coverage = total portion of images covered by each label
frequency, coverage = gather_label_statistics(
data_sets, test_limit, single_process,
segmentation_size, crop, verbose)
# Phase 2: Sort, collapse, and filter labels
labnames, syns = normalize_labels(data_sets, frequency, coverage, synonyms)
report_aliases(directory, labnames, data_sets, frequency, verbose)
# Phase 3: Filter by frequncy, and assign numbers
names, assignments = assign_labels(
labnames, frequency, coverage, min_frequency, min_coverage, verbose)
# Phase 4: Collate and output label stats
cats = write_label_files(
directory, names, assignments, frequency, coverage, syns, verbose)
# Phase 5: Create normalized segmentation files
create_segmentations(
directory, data_sets, splits, assignments, size, segmentation_size,
crop, cats, test_limit, single_process, verbose)
def gather_label_statistics(data_sets, test_limit, single_process,
segmentation_size, crop, verbose):
'''
Phase 1 of unification. Counts label statistics.
'''
# Count frequency and coverage for each individual image
stats = map_in_pool(partial(count_label_statistics,
segmentation_size=segmentation_size,
crop=crop,
verbose=verbose),
all_dataset_segmentations(data_sets, test_limit),
single_process=single_process,
verbose=verbose)
# Add them up
frequency, coverage = (sum_histogram(d) for d in zip(*stats))
# TODO: also, blacklist images that have insufficient labled pixels
return frequency, coverage
def normalize_labels(data_sets, frequency, coverage, synonyms):
'''
Phase 2 of unification.
Assigns unique label names and resolves duplicates by name.
'''
# Sort by frequency, descending, and assign a name to each label
top_syns = {}
labname = {}
freq_items = zip(*sorted((-f, lab) for lab, f in frequency.items()))[1]
for lab in freq_items:
dataset, category, label = lab
names = [n.lower() for n in data_sets[dataset].all_names(
category, label) if len(n) and n != '-']
if synonyms:
names = synonyms(names)
# Claim the synonyms that have not already been taken
for name in names:
if name not in top_syns:
top_syns[name] = lab
# best_labname may decide to collapse two labels because they
# have the same names and seem to mean the same thing
labname[lab], unique = best_labname(
lab, names, labname, top_syns, coverage, frequency)
return labname, top_syns
def report_aliases(directory, labnames, data_sets, frequency, verbose):
'''
Phase 2.5
Report the aliases. These are printed into 'syns.txt'
'''
show_all = True # Print all details to help debugging
name_to_lab = invert_dict(labnames)
with codecs.open(os.path.join(directory, 'syns.txt'), 'w', 'utf-8') as f:
def report(txt):
f.write('%s\n' % txt)
if verbose:
print txt
for name in sorted(name_to_lab.keys(),
key=lambda n: (-len(name_to_lab[n]), n)):
keys = name_to_lab[name]
if not show_all and len(keys) <= 1:
break
# Don't bother reporting aliases if all use the same index;
# that is probably due to an underlying shared code.
if not show_all and len(set(i for d, c, i in keys)) <= 1:
continue
report('name: %s' % name)
for ds, cat, i in keys:
names = ';'.join(data_sets[ds].all_names(cat, i))
freq = frequency[(ds, cat, i)]
report('%s/%s#%d: %d, (%s)' % (ds, cat, i, freq, names))
report('')
def assign_labels(
labnames, frequency, coverage, min_frequency, min_coverage, verbose):
'''
Phase 3 of unification.
Filter names that are too infrequent, then assign numbers.
'''
# Collect by-name frequency and coverage
name_frequency = join_histogram(frequency, labnames)
name_coverage = join_histogram(coverage, labnames)
names = name_frequency.keys()
if min_frequency is not None:
names = [n for n in names if name_frequency[n] >= min_frequency]
if min_coverage is not None:
names = [n for n in names if name_coverage[n] >= min_coverage]
# Put '-' at zero
names = [n for n in names if n != '-']
names = ['-'] + sorted(names,
key=lambda x: (-name_frequency[x], -name_coverage[x]))
nums = dict((n, i) for i, n in enumerate(names))
assignments = dict((k, nums.get(v, 0)) for k, v in labnames.items())
return names, assignments
def write_label_files(
directory, names, assignments, frequency, coverage, syns, verbose):
'''
Phase 4 of unification.
Collate some stats and then write then to two metadata files.
'''
# Make lists of synonyms claimed by each label
synmap = invert_dict(
dict((w, assignments[lab]) for w, lab in syns.items()))
# We need an (index, category) count
ic_freq = join_histogram_fn(frequency, lambda x: (assignments[x], x[1]))
ic_cov = join_histogram_fn(coverage, lambda x: (assignments[x], x[1]))
for z in [(j, cat) for j, cat in ic_freq if j == 0]:
del ic_freq[z]
del ic_cov[z]
catstats = [[] for n in names]
# For each index, get a (category, frequency) list in descending order
for (ind, cat), f in sorted(ic_freq.items(), key=lambda x: -x[1]):
catstats[ind].append((cat, f))
index_coverage = join_histogram(coverage, assignments)
with open(os.path.join(directory, 'label.csv'), 'w') as csvfile:
fields = ['number', 'name', 'category',
'frequency', 'coverage', 'syns']
writer = DictUnicodeWriter(csvfile, fieldnames=fields)
writer.writeheader()
for ind, name in enumerate(names):
if ind == 0:
continue
writer.writerow(dict(
number='%d' % ind,
name=name,
category=';'.join('%s(%d)' % s for s in catstats[ind]),
frequency='%d' % sum(f for c, f in catstats[ind]),
coverage='%f' % index_coverage[ind],
syns=';'.join([s for s in synmap[ind] if s != name])
))
# For each category, figure the first, last, and other stats
cat_ind = [(cat, ind) for ind, cat in ic_freq.keys()]
first_index = build_histogram(cat_ind, min)
last_index = build_histogram(cat_ind, max)
count_labels = build_histogram([(cat, 1) for cat, _ in cat_ind])
cat_freq = join_histogram_fn(ic_freq, lambda x: x[1])
cats = sorted(first_index.keys(), key=lambda x: first_index[x])
with open(os.path.join(directory, 'category.csv'), 'w') as csvfile:
fields = ['name', 'first', 'last', 'count', 'frequency']
writer = DictUnicodeWriter(csvfile, fieldnames=fields)
writer.writeheader()
for cat in cats:
writer.writerow(dict(
name=cat,
first=first_index[cat],
last=last_index[cat],
count=count_labels[cat],
frequency=cat_freq[cat]))
# And for each category, create a dense coding file.
for cat in cats:
dense_code = [0] + sorted([i for i, c in ic_freq if c == cat],
key=lambda i: (-ic_freq[(i, cat)], -ic_cov[(i, cat)]))
fields = ['code', 'number', 'name', 'frequency', 'coverage']
with open(os.path.join(directory, 'c_%s.csv' % cat), 'w') as csvfile:
writer = DictUnicodeWriter(csvfile, fieldnames=fields)
writer.writeheader()
for code, i in enumerate(dense_code):
if code == 0:
continue
writer.writerow(dict(
code=code,
number=i,
name=names[i],
frequency=ic_freq[(i, cat)],
coverage=ic_cov[(i, cat)]))
return cats
def create_segmentations(directory, data_sets, splits, assignments, size,
segmentation_size, crop, cats, test_limit, single_process, verbose):
'''
Phase 5 of unification. Create the normalized segmentation files
'''
if size is not None and segmentation_size is None:
segmentation_size = size
# Get assignments into a nice form, once, here.
# (dataset, category): [numpy array with new indexes]
index_max = build_histogram(
[((ds, cat), i) for ds, cat, i in assignments.keys()], max)
index_mapping = dict([k, numpy.zeros(i + 1, dtype=numpy.int16)]
for k, i in index_max.items())
for (ds, cat, oldindex), newindex in assignments.items():
index_mapping[(ds, cat)][oldindex] = newindex
# Count frequency and coverage for each individual image
segmented = map_in_pool(
partial(translate_segmentation,
directory=directory,
mapping=index_mapping,
size=size,
segmentation_size=segmentation_size,
categories=cats,
crop=crop,
verbose=verbose),
all_dataset_segmentations(data_sets, test_limit),
single_process=single_process,
verbose=verbose)
# Sort nonempty itesm randomly+reproducibly by md5 hash of the filename.
ordered = sorted([(hashed_float(r['image']), r) for r in segmented if r])
# Assign splits, pullout out last 20% for validation.
cutoffs = cumulative_splits(splits)
for floathash, record in ordered:
for name, cutoff in cutoffs:
if floathash <= cutoff:
record['split'] = name
break
else:
assert False, 'hash %f exceeds last split %f' % (floathash, c)
# Now write one row per image and one column per category
with open(os.path.join(directory, 'index.csv'), 'w') as csvfile:
fields = ['image', 'split', 'ih', 'iw', 'sh', 'sw'] + cats
writer = DictUnicodeWriter(csvfile, fieldnames=fields)
writer.writeheader()
for f, record in ordered:
writer.writerow(record)
def translate_segmentation(record, directory, mapping, size,
segmentation_size, categories, crop, verbose):
'''
Translates a single segmentation.
'''
dataset, index, seg_class, filename, md = record
basename = os.path.basename(filename)
if verbose:
print 'Processing #%d %s %s' % (index, dataset, basename)
full_seg, shape = seg_class.resolve_segmentation(md)
# Rows can be omitted by returning no segmentations.
if not full_seg:
return None
jpg = imread(filename)
if size is not None:
jpg = scale_image(jpg, size, crop)
for cat in full_seg:
full_seg[cat] = scale_segmentation(
full_seg[cat], segmentation_size, crop)
else:
size = jpg.shape[:2]
segmentation_size = (1, 1)
for cat in full_seg:
if len(numpy.shape(full_seg[cat])) >= 2:
segmentation_size = numpy.shape(full_seg[cat])
break
imagedir = os.path.join(directory, 'images')
ensure_dir(os.path.join(imagedir, dataset))
fn = save_image(jpg, imagedir, dataset, basename)
result = {
'image': os.path.join(dataset, fn),
'ih': size[0],
'iw': size[1],
'sh': segmentation_size[0],
'sw': segmentation_size[1]
}
for cat in full_seg:
if cat not in categories:
continue # skip categories with no data globally
result[cat] = ';'.join(save_segmentation(full_seg[cat],
imagedir, dataset, fn, cat, mapping[(dataset, cat)]))
return result
def best_labname(lab, names, assignments, top_syns, coverage, frequency):
'''
Among the given names, chooses the best name to assign, given
information about previous assignments, synonyms, and stats
'''
# Best shot: get my own name, different from other names.
if 'dog' in names:
print names
for name in names:
if top_syns[name] == lab:
return name, True
if len(names) == 0 or len(names) == '1' and names[0] in ['', '-']:
# If there are no names, we must use '-' and map to 0.
return '-', False
elif len(names) == 1:
# If we have a conflict without ambiguity, then we just merge names.
other = top_syns[names[0]]
else:
# If we need to merge and we have multiple synonyms, let's score them.
scores = []
# Get the average size of an object of my type
size = coverage[lab] / frequency[lab]
for name in names:
other = top_syns[name]
# Compare it to the average size of objects of other types
other_size = coverage[other] / frequency[other]
scores.append((abs(size - other_size), -frequency[other], other))
# Choose the synonyms that has the most similar average size.
# (breaking ties according to maximum frequency)
other = min(scores)[2]
name = assignments[other]
return name, False
def build_histogram(pairs, reducer=operator.add):
'''Creates a histogram by combining a list of key-value pairs.'''
result = {}
for k, v in pairs:
if k not in result:
result[k] = v
else:
result[k] = reducer(result[k], v)
return result
def join_histogram_fn(histogram, makekey):
'''Rekeys histogram according to makekey fn, summing joined buckets.'''
result = {}
for oldkey, val in histogram.iteritems():
newkey = makekey(oldkey)
if newkey not in result:
result[newkey] = val
else:
result[newkey] += val
return result
def join_histogram(histogram, newkeys):
'''Rekeys histogram according to newkeys map, summing joined buckets.'''
result = {}
for oldkey, newkey in newkeys.iteritems():
if newkey not in result:
result[newkey] = histogram[oldkey]
else:
result[newkey] += histogram[oldkey]
return result
def sum_histogram(histogram_list):
'''Adds histogram dictionaries elementwise.'''
result = {}
for d in histogram_list:
for k, v in d.iteritems():
if k not in result:
result[k] = v
else:
result[k] += v
return result
def invert_dict(d):
'''Transforms {k: v} to {v: [k,k..]}'''
result = {}
for k, v in d.iteritems():
if v not in result:
result[v] = [k]
else:
result[v].append(k)
return result
def count_label_statistics(record, segmentation_size, crop, verbose):
'''
Resolves the segmentation record, and then counts all nonzero
labeled pixels in the resuting segmentation. Returns two maps:
freq[(dataset, category, label)] = 1, if the label is present
coverage[(dataset, category, label)] = p, for portion of pixels covered
'''
dataset, index, seg_class, fn, md = record
if verbose:
print 'Counting #%d %s %s' % (index, dataset, os.path.basename(fn))
full_seg, shape = seg_class.resolve_segmentation(md)
freq = {}
coverage = {}
for category, seg in full_seg.items():
if seg is None:
continue
dims = len(numpy.shape(seg))
if dims <= 1:
for label in (seg if dims else (seg,)):
key = (dataset, category, int(label))
freq[key] = 1
coverage[key] = 1.0
elif dims >= 2:
# We do _not_ scale the segmentation for counting purposes!
# Different scales should produce the same labels and label order.
# seg = scale_segmentation(seg, segmentation_size, crop=crop)
bc = numpy.bincount(seg.ravel())
pixels = numpy.prod(seg.shape[-2:])
for label in bc.nonzero()[0]:
if label > 0:
key = (dataset, category, int(label))
freq[key] = 1
coverage[key] = float(bc[label]) / pixels
return freq, coverage
def all_dataset_segmentations(data_sets, test_limit=None):
'''
Returns an iterator for metadata over all segmentations
for all images in all the datasets. The iterator iterates over
(dataset_name, global_index, dataset_resolver, metadata(i))
'''
j = 0
for name, ds in data_sets.items():
for i in truncate_range(range(ds.size()), test_limit):
yield (name, j, ds.__class__, ds.filename(i), ds.metadata(i))
j += 1
def truncate_range(data, limit):
'''For testing, if limit is not None, limits data by slicing it.'''
if limit is None:
return data
if isinstance(limit, slice):
return data[limit]
return data[:limit]
def map_in_pool(fn, data, single_process=False, verbose=False):
'''
Our multiprocessing solution; wrapped to stop on ctrl-C well.
'''
if single_process:
return map(fn, data)
n_procs = min(cpu_count(), 12)
original_sigint_handler = setup_sigint()
pool = Pool(processes=n_procs, initializer=setup_sigint)
restore_sigint(original_sigint_handler)
try:
if verbose:
print 'Mapping with %d processes' % n_procs
res = pool.map_async(fn, data)
return res.get(31536000)
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
raise
else:
pool.close()
pool.join()
def setup_sigint():
return signal.signal(signal.SIGINT, signal.SIG_IGN)
def restore_sigint(original):
signal.signal(signal.SIGINT, original)
def scale_image(im, dims, crop=False):
'''
Scales or crops a photographic image using antialiasing.
'''
if len(im.shape) == 2:
# Handle grayscale images by adding an RGB channel
im = numpy.repeat(im[numpy.newaxis], 3, axis=0)
if im.shape[0:2] != dims:
if not crop:
im = imresize(im, dims)
else:
source = im.shape[0:2]
aspect = float(dims[1]) / dims[0]
if aspect * source[0] > source[1]:
width = int(dims[1] / aspect)
margin = (width - dims[0]) // 2
im = imresize(im, (width, dims[1]))[
margin:margin + dims[0], :, :]
else:
height = int(dims[0] * aspect)
margin = (height - dims[1]) // 2
im = imresize(im, (dims[0], height))[
margin:margin + dims[1], :, :]
return im
def scale_segmentation(segmentation, dims, crop=False):
'''
Zooms a 2d or 3d segmentation to the given dims, using nearest neighbor.
'''
shape = numpy.shape(segmentation)
if len(shape) < 2 or shape[-2:] == dims:
return segmentation
peel = (len(shape) == 2)
if peel:
segmentation = segmentation[numpy.newaxis]
levels = segmentation.shape[0]
result = numpy.zeros((levels, ) + dims,
dtype=segmentation.dtype)
ratio = (1,) + tuple(res / float(orig)
for res, orig in zip(result.shape[1:], segmentation.shape[1:]))
if not crop:
safezoom(segmentation, ratio, output=result, order=0)
else:
ratio = max(ratio[1:])
height = int(round(dims[0] / ratio))
hmargin = (segmentation.shape[0] - height) // 2
width = int(round(dims[1] / ratio))
wmargin = (segmentation.shape[1] - height) // 2
safezoom(segmentation[:, hmargin:hmargin + height,
wmargin:wmargin + width],
(1, ratio, ratio), output=result, order=0)
if peel:
result = result[0]
return result
def safezoom(array, ratio, output=None, order=0):
'''Like numpy.zoom, but does not crash when the first dimension
of the array is of size 1, as happens often with segmentations'''
dtype = array.dtype
if array.dtype == numpy.float16:
array = array.astype(numpy.float32)
if array.shape[0] == 1:
if output is not None:
output = output[0, ...]
result = zoom(array[0, ...], ratio[1:],
output=output, order=order)
if output is None:
output = result[numpy.newaxis]
else:
result = zoom(array, ratio, output=output, order=order)
if output is None:
output = result
return output.astype(dtype)
def save_image(im, imagedir, dataset, filename):
'''
Try to pick a unique name and save the image with that name.
This is not race-safe, so the given name should already be unique.
'''
trynum = 1
fn = filename
while os.path.exists(os.path.join(imagedir, dataset, fn)):
trynum += 1
fn = re.sub('(?:\.jpg)?$', '%d.jpg' % trynum, filename)
imsave(os.path.join(imagedir, dataset, fn), im)
return fn
def save_segmentation(seg, imagedir, dataset, filename, category, translation):
'''
Saves the segmentation in a file or files if it is large, recording
the filenames. Or serializes it as decimal numbers to record if it
is small. Then returns the array of strings recorded.
'''
if seg is None:
return None
shape = numpy.shape(seg)
if len(shape) < 2:
# Save numbers as decimal strings; and omit zero labels.
return ['%d' % translation[t]
for t in (seg if len(shape) else [seg]) if t]
result = []
for channel in ([()] if len(shape) == 2 else range(shape[0])):
# Save bitwise channels as filenames of PNGs; and save the files.
im = encodeRG(translation[seg[channel]])
if len(shape) == 2:
fn = re.sub('(?:\.jpg)?$', '_%s.png' % category, filename)
else:
fn = re.sub('(?:\.jpg)?$', '_%s_%d.png' %
(category, channel + 1), filename)
result.append(os.path.join(dataset, fn))
imsave(os.path.join(imagedir, dataset, fn), im)
return result
def encodeRG(channel):
'''Encodes a 16-bit per-pixel code using the red and green channels.'''
result = numpy.zeros(channel.shape + (3,), dtype=numpy.uint8)
result[:, :, 0] = channel % 256
result[:, :, 1] = (channel // 256)
return result
def ensure_dir(targetdir):
if not os.path.isdir(targetdir):
try:
os.makedirs(targetdir)
except:
pass
def hashed_float(s):
# Inspired by http://code.activestate.com/recipes/391413/ by Ori Peleg
'''Hashes a string to a float in the range [0, 1).'''
import md5
import struct
[number] = struct.unpack(">Q", md5.new(s).digest()[:8])
return number / (2.0 ** 64) # python will constant-fold this denominator.
def cumulative_splits(splits):
'''Converts [0.8, 0.1, 0.1] to [0.8, 0.9, 1.0]'''
if splits is None:
return [('train', 1.0)] # Default to just one split.
result = []
c = 0.0
for name, s in splits.items():
c += s
result.append((name, c))
# Eliminate any fp rounding problem: round last split up to 1.0
if result[-1][1] < 1.0 - len(result) * sys.float_info.epsilon:
raise ValueError('splits must add to 1.0, but %s add to %s' % (
repr(splits), result[-1][1]))
result[-1] = (result[-1][0], 1.0)
return result
def write_readme_file(args, directory, verbose):
'''
Writes a README.txt that describes the settings used to geenrate the ds.
'''
with codecs.open(os.path.join(directory, 'README.txt'), 'w', 'utf-8') as f:
def report(txt):
f.write('%s\n' % txt)
if verbose:
print txt
title = '%s joined segmentation data set' % os.path.basename(directory)
report('%s\n%s' % (title, '=' * len(title)))
for key, val in args:
if key == 'data_sets':
report('Joins the following data sets:')
for name, kind in val.items():
report(' %s: %d images' % (name, kind.size()))
else:
report('%s: %r' % (key, val))
report('\ngenerated at: %s' % time.strftime("%Y-%m-%d %H:%M"))
try:
label = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
report('git label: %s' % label)
except:
pass
f.write('\n')
# Add boilerplate readme text
f.write(README_TEXT)
if __name__ == '__main__':
import argparse
import adeseg
import dtdseg
import osseg
import pascalseg
from synonym import synonyms
parser = argparse.ArgumentParser(
description='Generate broden dataset.')
parser.add_argument(
'--size',
type=int, default=224,
help='pixel size for input images, e.g., 224 or 227')
args = parser.parse_args()
image_size = (args.size, args.size)
seg_size = (args.size // 2, args.size // 2)
print 'CREATING NEW SEGMENTATION OF SIZE %d.\n' % args.size
print 'Loading source segmentations.'
ade = adeseg.AdeSegmentation('dataset/ade20k', 'ADE20K_2016_07_26')
dtd = dtdseg.DtdSegmentation('dataset/dtd/dtd-r1.0.1')
# OpenSurfaces is not balanced in scene and object types.
oss = osseg.OpenSurfaceSegmentation('dataset/opensurfaces/',
supply=set(['material', 'color']))
# Remove distinction between upper-arm, lower-arm, etc.
pascal = pascalseg.PascalSegmentation('dataset/pascal/',
collapse_adjectives=set([
'left', 'right', 'front', 'back', 'upper', 'lower', 'side']))
data = OrderedDict(ade20k=ade, dtd=dtd, opensurfaces=oss, pascal=pascal)
unify(data,
splits=OrderedDict(train=0.7, val=0.3),
size=image_size, segmentation_size=seg_size,
directory=('dataset/broden1_%d' % args.size),
synonyms=synonyms,
min_frequency=10, min_coverage=0.5, verbose=True)
|
|
from os.path import join
from datetime import datetime
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _, get_language
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.shortcuts import get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from publisher import MpttPublisher
from publisher.errors import PublisherCantPublish
from cms.utils.urlutils import urljoin
from cms.models.managers import PageManager, PagePermissionsPermissionManager
from cms.utils.page import get_available_slug, check_title_slugs
from cms.exceptions import NoHomeFound
from cms.utils.helpers import reversion_register
from cms.utils.i18n import get_fallback_languages
class Page(MpttPublisher):
"""
A simple hierarchical page model
"""
MODERATOR_CHANGED = 0
MODERATOR_NEED_APPROVEMENT = 1
MODERATOR_NEED_DELETE_APPROVEMENT = 2
MODERATOR_APPROVED = 10
# special case - page was approved, but some of page parents if not approved yet
MODERATOR_APPROVED_WAITING_FOR_PARENTS = 11
moderator_state_choices = (
(MODERATOR_CHANGED, _('changed')),
(MODERATOR_NEED_APPROVEMENT, _('req. app.')),
(MODERATOR_NEED_DELETE_APPROVEMENT, _('delete')),
(MODERATOR_APPROVED, _('approved')),
(MODERATOR_APPROVED_WAITING_FOR_PARENTS, _('app. par.')),
)
created_by = models.CharField(_("created by"), max_length=70, editable=False)
changed_by = models.CharField(_("changed by"), max_length=70, editable=False)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
creation_date = models.DateTimeField(editable=False, default=datetime.now)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_('When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True, help_text=_('When to expire the page. Leave empty to never expire.'), db_index=True)
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False, help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_("An unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("navigation extenders"), max_length=80, db_index=True, blank=True, null=True, choices=settings.CMS_NAVIGATION_EXTENDERS)
published = models.BooleanField(_("is published"), blank=True)
template = models.CharField(_("template"), max_length=100, choices=settings.CMS_TEMPLATES, help_text=_('The template used to render the content.'))
site = models.ForeignKey(Site, help_text=_('The site the page is accessible at.'), verbose_name=_("site"))
moderator_state = models.SmallIntegerField(_('moderator state'), choices=moderator_state_choices, default=MODERATOR_NEED_APPROVEMENT, blank=True)
level = models.PositiveIntegerField(db_index=True, editable=False)
lft = models.PositiveIntegerField(db_index=True, editable=False)
rght = models.PositiveIntegerField(db_index=True, editable=False)
tree_id = models.PositiveIntegerField(db_index=True, editable=False)
login_required = models.BooleanField(_("login required"),default=False)
menu_login_required = models.BooleanField(_("menu login required"),default=False, help_text=_("only show this page in the menu if the user is logged in"))
# Managers
objects = PageManager()
permissions = PagePermissionsPermissionManager()
class Meta:
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('tree_id', 'lft')
app_label = 'cms'
class PublisherMeta:
exclude_fields_append = ['moderator_state']
def __unicode__(self):
title = self.get_menu_title(fallback=True)
if title is None:
title = u""
pre_title = settings.CMS_TITLE_CHARACTER * self.level
return u'%s%s' % (pre_title, title)
def move_page(self, target, position='first-child'):
"""Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to mptt, but after move is done page_moved signal is fired.
"""
self.move_to(target, position)
# fire signal
from cms.models.moderatormodels import PageModeratorState
self.force_moderation_action = PageModeratorState.ACTION_MOVE
import cms.signals as cms_signals
cms_signals.page_moved.send(sender=Page, instance=self) #titles get saved before moderation
self.save(change_state=True) # always save the page after move, because of publisher
# check the slugs
check_title_slugs(self)
def copy_page(self, target, site, position='first-child', copy_permissions=True, copy_moderation=True):
"""
copy a page and all its descendants to a new location
Doesn't checks for add page permissions anymore, this is done in PageAdmin.
"""
from cms.utils.moderator import update_moderation_message
descendants = [self] + list(self.get_descendants().order_by('-rght'))
site_reverse_ids = [ x[0] for x in Page.objects.filter(site=site, reverse_id__isnull=False).values_list('reverse_id') ]
if target:
target.old_pk = -1
if position == "first_child":
tree = [target]
elif target.parent_id:
tree = [target.parent]
else:
tree = []
else:
tree = []
if tree:
tree[0].old_pk = tree[0].pk
first = True
for page in descendants:
titles = list(page.title_set.all())
plugins = list(page.cmsplugin_set.all().order_by('tree_id', '-rght'))
origin_id = page.id
page.old_pk = page.pk
page.pk = None
page.level = None
page.rght = None
page.lft = None
page.tree_id = None
page.published = False
page.publisher_status = Page.MODERATOR_CHANGED
page.publisher_public_id = None
if page.reverse_id in site_reverse_ids:
page.reverse_id = None
if first:
first = False
if tree:
page.parent = tree[0]
else:
page.parent = None
page.insert_at(target, position)
else:
count = 1
found = False
for prnt in tree:
if prnt.old_pk == page.parent_id:
page.parent = prnt
tree = tree[0:count]
found = True
break
count += 1
if not found:
page.parent = None
tree.append(page)
page.site = site
page.save()
# copy moderation, permissions if necessary
if settings.CMS_PERMISSION and copy_permissions:
from cms.models.permissionmodels import PagePermission
for permission in PagePermission.objects.filter(page__id=origin_id):
permission.pk = None
permission.page = page
permission.save()
if settings.CMS_MODERATOR and copy_moderation:
from cms.models.moderatormodels import PageModerator
for moderator in PageModerator.objects.filter(page__id=origin_id):
moderator.pk = None
moderator.page = page
moderator.save()
update_moderation_message(page, unicode(_('Page was copied.')))
for title in titles:
title.pk = None
title.publisher_public_id = None
title.published = False
title.page = page
title.slug = get_available_slug(title)
title.save()
ptree = []
for p in plugins:
try:
plugin, cls = p.get_plugin_instance()
except KeyError: #plugin type not found anymore
continue
p.page = page
p.pk = None
p.id = None
p.tree_id = None
p.lft = None
p.rght = None
p.inherited_public_id = None
p.publisher_public_id = None
if p.parent:
pdif = p.level - ptree[-1].level
if pdif < 0:
ptree = ptree[:pdif-1]
p.parent = ptree[-1]
if pdif != 0:
ptree.append(p)
else:
ptree = [p]
p.level = None
p.save()
if plugin:
plugin.pk = p.pk
plugin.id = p.pk
plugin.page = page
plugin.tree_id = p.tree_id
plugin.lft = p.lft
plugin.rght = p.rght
plugin.level = p.level
plugin.cmsplugin_ptr = p
plugin.publisher_public_id = None
plugin.public_id = None
plugin.plubished = False
plugin.save()
def save(self, no_signals=False, change_state=True, commit=True, force_with_moderation=False, force_state=None, **kwargs):
"""
Args:
commit: True if model should be really saved
force_with_moderation: can be true when new object gets added under
some existing page and this new page will require moderation;
this is because of how this adding works - first save, then move
"""
# Published pages should always have a publication date
publish_directly, under_moderation = False, False
if self.publisher_is_draft:
# publisher specific stuff, but only on draft model, this is here
# because page initializes publish process
if settings.CMS_MODERATOR:
under_moderation = force_with_moderation or self.pk and bool(self.get_moderator_queryset().count())
created = not bool(self.pk)
if settings.CMS_MODERATOR:
if change_state:
if created:
# new page....
self.moderator_state = Page.MODERATOR_CHANGED
elif not self.requires_approvement():
# always change state to need approvement when there is some change
self.moderator_state = Page.MODERATOR_NEED_APPROVEMENT
if not under_moderation and (self.published or self.publisher_public):
# existing page without moderator - publish it directly if
# published is True
publish_directly = True
elif change_state:
self.moderator_state = Page.MODERATOR_CHANGED
#publish_directly = True - no publisher, no publishing!! - we just
# use draft models in this case
if force_state is not None:
self.moderator_state = force_state
if self.publication_date is None and self.published:
self.publication_date = datetime.now()
# Drafts should not, unless they have been set to the future
if self.published:
if settings.CMS_SHOW_START_DATE:
if self.publication_date and self.publication_date <= datetime.now():
self.publication_date = None
else:
self.publication_date = None
if self.reverse_id == "":
self.reverse_id = None
from cms.utils.permissions import _thread_locals
user = getattr(_thread_locals, "user", None)
if user:
self.changed_by = user.username
else:
self.changed_by = "script"
if not self.pk:
self.created_by = self.changed_by
if commit:
if no_signals:# ugly hack because of mptt
super(Page, self).save_base(cls=self.__class__, **kwargs)
else:
super(Page, self).save(**kwargs)
#if commit and (publish_directly or created and not under_moderation):
if self.publisher_is_draft and commit and publish_directly:
self.publish()
# post_publish signal moved to end of publish method()
def get_calculated_status(self):
"""
get the calculated status of the page based on published_date,
published_end_date, and status
"""
if settings.CMS_SHOW_START_DATE:
if self.publication_date > datetime.now():
return False
if settings.CMS_SHOW_END_DATE and self.publication_end_date:
if self.publication_end_date < datetime.now():
return True
return self.published
calculated_status = property(get_calculated_status)
def get_languages(self):
"""
get the list of all existing languages for this page
"""
from cms.models.titlemodels import Title
if not hasattr(self, "all_languages"):
self.all_languages = Title.objects.filter(page=self).values_list("language", flat=True).distinct()
self.all_languages = list(self.all_languages)
self.all_languages.sort()
return self.all_languages
def get_absolute_url(self, language=None, fallback=True):
try:
if self.is_home():
return reverse('pages-root')
except NoHomeFound:
pass
if settings.CMS_FLAT_URLS:
path = self.get_slug(language, fallback)
else:
path = self.get_path(language, fallback)
home_pk = None
try:
home_pk = self.home_pk_cache
except NoHomeFound:
pass
ancestors = self.get_cached_ancestors()
if self.parent_id and ancestors[-1].pk == home_pk and not self.get_title_obj_attribute("has_url_overwrite", language, fallback) and path:
path = "/".join(path.split("/")[1:])
return urljoin(reverse('pages-root'), path)
def get_cached_ancestors(self, ascending=True):
if ascending:
if not hasattr(self, "ancestors_ascending"):
self.ancestors_ascending = list(self.get_ancestors(ascending))
return self.ancestors_ascending
else:
if not hasattr(self, "ancestors_descending"):
self.ancestors_descending = list(self.get_ancestors(ascending))
return self.ancestors_descending
def get_title_obj(self, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, version_id, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle()
def get_title_obj_attribute(self, attrname, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
return getattr(self.get_title_obj(language, fallback, version_id, force_reload), attrname)
except AttributeError:
return None
def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, version_id, force_reload)
def get_slug(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, version_id, force_reload)
def get_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, version_id, force_reload)
def get_menu_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, version_id, force_reload)
if not menu_title:
return self.get_title(language, True, version_id, force_reload)
return menu_title
def get_page_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, version_id, force_reload)
if not page_title:
return self.get_menu_title(language, True, version_id, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, version_id, force_reload)
def get_meta_keywords(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the keywords meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_keywords", language, fallback, version_id, force_reload)
def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get application urls conf for application hook
"""
return self.get_title_obj_attribute("application_urls", language, fallback, version_id, force_reload)
def get_redirect(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, version_id, force_reload)
def _get_title_cache(self, language, fallback, version_id, force_reload):
if not language:
language = get_language()
load = False
if not hasattr(self, "title_cache") or force_reload:
load = True
self.title_cache = {}
elif not language in self.title_cache:
if fallback:
fallback_langs = get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
load = True
if load:
from cms.models.titlemodels import Title
if version_id:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
for rev in revs:
obj = rev.object
if obj.__class__ == Title:
self.title_cache[obj.language] = obj
else:
title = Title.objects.get_title(self, language, language_fallback=fallback)
if title:
self.title_cache[title.language] = title
language = title.language
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
template = None
if self.template and len(self.template)>0 and \
self.template != settings.CMS_TEMPLATE_INHERITANCE_MAGIC:
template = self.template
else:
for p in self.get_ancestors(ascending=True):
template = p.get_template()
break
if not template:
template = settings.CMS_TEMPLATES[0][0]
return template
def get_template_name(self):
"""
get the textual name (2nd parameter in settings.CMS_TEMPLATES)
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in settings.CMS_TEMPLATES:
if t[0] == template:
return t[1]
return _("default")
def has_change_permission(self, request):
opts = self._meta
if request.user.is_superuser:
return True
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission()) and \
self.has_generic_permission(request, "change")
def has_delete_permission(self, request):
opts = self._meta
if request.user.is_superuser:
return True
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission()) and \
self.has_generic_permission(request, "delete")
def has_publish_permission(self, request):
return self.has_generic_permission(request, "publish")
def has_advanced_settings_permission(self, request):
return self.has_generic_permission(request, "advanced_settings")
def has_change_permissions_permission(self, request):
"""Has user ability to change permissions for current page?
"""
return self.has_generic_permission(request, "change_permissions")
def has_add_permission(self, request):
"""Has user ability to add page under current page?
"""
return self.has_generic_permission(request, "add")
def has_move_page_permission(self, request):
"""Has user ability to move current page?
"""
return self.has_generic_permission(request, "move_page")
def has_moderate_permission(self, request):
"""Has user ability to moderate current page? If moderation isn't
installed, nobody can moderate.
"""
if not settings.CMS_MODERATOR:
return False
return self.has_generic_permission(request, "moderate")
def has_generic_permission(self, request, type):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
att_name = "permission_%s_cache" % type
if not hasattr(self, "permission_user_cache") or not hasattr(self, att_name) \
or request.user.pk != self.permission_user_cache.pk:
from cms.utils.permissions import has_generic_permission
self.permission_user_cache = request.user
setattr(self, att_name, has_generic_permission(self.id, request.user, type, self.site_id))
if getattr(self, att_name):
self.permission_edit_cache = True
return getattr(self, att_name)
def is_home(self):
if self.parent_id:
return False
else:
try:
return self.home_pk_cache == self.pk
except NoHomeFound:
pass
return False
def get_home_pk_cache(self):
attr = "%s_home_pk_cache" % (self.publisher_is_draft and "draft" or "public")
if not hasattr(self, attr):
setattr(self, attr, self.get_object_queryset().get_home().pk)
return getattr(self, attr)
def set_home_pk_cache(self, value):
attr = "%s_home_pk_cache" % (self.publisher_is_draft and "draft" or "public")
setattr(self, attr, value)
home_pk_cache = property(get_home_pk_cache, set_home_pk_cache)
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing page-scope files.
This allows multiple pages to contain files with identical names without namespace issues.
Plugins such as Picture can use this method to initialise the 'upload_to' parameter for
File-based fields. For example:
image = models.ImageField(_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(settings.CMS_PAGE_MEDIA_PATH, "%d" % self.id, filename)
def last_page_states(self):
"""Returns last five page states, if they exist, optimized, calls sql
query only if some states available
"""
# TODO: optimize SQL... 1 query per page
if settings.CMS_MODERATOR:
has_moderator_state = getattr(self, '_has_moderator_state_chache', None)
if has_moderator_state == False:
return None
return self.pagemoderatorstate_set.all().order_by('created',)[:5]
return None
def get_moderator_queryset(self):
"""Returns ordered set of all PageModerator instances, which should
moderate this page
"""
from cms.models.moderatormodels import PageModerator
if not settings.CMS_MODERATOR or not self.tree_id:
return PageModerator.objects.get_empty_query_set()
q = Q(page__tree_id=self.tree_id, page__level__lt=self.level, moderate_descendants=True) | \
Q(page__tree_id=self.tree_id, page__level=self.level - 1, moderate_children=True) | \
Q(page__pk=self.pk, moderate_page=True)
return PageModerator.objects.distinct().filter(q).order_by('page__level')
def is_under_moderation(self):
return bool(self.get_moderator_queryset().count())
def is_approved(self):
"""Returns true, if page is approved and published, or approved, but
parents are missing..
"""
return self.moderator_state in (Page.MODERATOR_APPROVED, Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS)
def publish(self):
"""Overrides Publisher method, because there may be some descendants, which
are waiting for parent to publish, so publish them if possible.
IMPORTANT: @See utils.moderator.approve_page for publishing permissions
Returns: True if page was successfully published.
"""
if not settings.CMS_MODERATOR:
return
# publish, but only if all parents are published!!
published = None
try:
published = super(Page, self).publish()
self.moderator_state = Page.MODERATOR_APPROVED
except PublisherCantPublish:
self.moderator_state = Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS
self.save(change_state=False)
if not published:
# was not published, escape
return
# clean moderation log
self.pagemoderatorstate_set.all().delete()
# page was published, check if there are some childs, which are waiting
# for publishing (because of the parent)
publish_set = self.children.filter(moderator_state = Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS)
for page in publish_set:
# recursive call to all childrens....
page.moderator_state = Page.MODERATOR_APPROVED
page.save(change_state=False)
page.publish()
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self)
return published
def is_public_published(self):
"""Returns true if public model is published.
"""
if hasattr(self, 'public_published_cache'):
# if it was cached in change list, return cached value
return self.public_published_cache
# othervise make db lookup
if self.publisher_public_id:
return self.publisher_public.published
#return is_public_published(self)
return False
def requires_approvement(self):
return self.moderator_state in (Page.MODERATOR_NEED_APPROVEMENT, Page.MODERATOR_NEED_DELETE_APPROVEMENT)
def get_moderation_value(self, user):
"""Returns page moderation value for given user, moderation value is
sum of moderations.
"""
moderation_value = getattr(self, '_moderation_value_cahce', None)
if moderation_value is not None and self._moderation_value_cache_for_user_id == user.pk:
return moderation_value
try:
page_moderator = self.pagemoderator_set.get(user=user)
except ObjectDoesNotExist:
return 0
moderation_value = page_moderator.get_decimal()
self._moderation_value_cahce = moderation_value
self._moderation_value_cache_for_user_id = user
return moderation_value
reversion_register(Page, follow=["title_set", "cmsplugin_set", "pagepermission_set"])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Heriot-Watt University nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Original authors:
# Valerio De Carolis, Marian Andrecki, Corina Barbalata, Gordon Frost
"""Navigation Simulator node calculates the position of the vehicle in a simulated environment.
It consumes a force input in body-frame coordinates and updates the position of the vehicle in the simulated environment.
The input forces can be generated using the thrusters_simulator provided in the vehicle_core package. This two aspects
have been separated during the implementation of the nav_sim node to reduce coupling and allow the implementation of a real
thruster simulator using the thruster model developed during experiments conducted in the OSL tank.
This will enable software-in-the-loop (SIL) and hardware-in-the-loop (HIL)
simulations for a generic underwater vehicle given the thruster allocation
matrix (TAM) and the dynamic equations (DE).
"""
from __future__ import division
import numpy as np
np.set_printoptions(precision=3, suppress=True)
from vehicle_core.model import vehicle_model as vm
from vehicle_core.model import dynamic_model as dm
from vehicle_core.util import conversions as cnv
# simulator constants
DEFAULT_SEED = 47
DEFAULT_SEA_DEPTH = 1000.0 # meters
MIN_DEPTH = -0.15 # meters
MAX_PITCH = 1.570 # radians
MAX_SENSED_DEPTH = 100.0 # meters (this set the maximum value of the altitude in nav_stat)
MAX_CURRENT = 3.0 # m/s
# console log
CONSOLE_STATUS = """nav_sim:
tau: %s
F_net: %s
acc: %s
vel: %s
pos: %s %s
altitude: %s
vel_water: %s
"""
class NavigationSimulator(object):
"""NavigationSimulator is ..."""
def __init__(self, dt, model_config, **kwargs):
# state
self.t = 0.0 # simulation time (sec)
self.dt = dt # simulation step (sec)
# state of the vehicle (body frame referenced if not specified)
# x axis is along the vehicle, y axis is to the right of the vehicle, z axis is downward oriented
#
# vel = [u v w p q r]
# a = d vel/dt
# pos = [x y z phi theta psi]
#
self.acc = np.zeros(6, dtype=np.float64) # output: linear and angular acceleration
self.vel = np.zeros(6, dtype=np.float64) # velocity: linear and angular velocity (body-frame)
self.pos = np.zeros(6, dtype=np.float64) # position: linear and angular position
self.pos_prev = np.zeros(6, dtype=np.float64) # position: linear and angular position
self.tau = np.zeros(6, dtype=np.float64) # input forces [x y z k m n ] (N)
# initial config
self.pos = np.array(kwargs.get('pos', self.pos.tolist())) # initial position
self.depth_bottom = kwargs.get('depth_bottom', DEFAULT_SEA_DEPTH) # sea bottom (meters)
# dynamic model
self.model_config = model_config
self.model = vm.VehicleModel(self.model_config)
# jacobians
self.J = np.zeros((6, 6), dtype=np.float64) # jacobian matrix (translate from body to Earth referenced)
self.J_inv = np.zeros((6, 6), dtype=np.float64) # inverse jacobian matrix
# velocity vectors used in the navigation simulator
# vel_model is used for the combined velocity of vehicle and water current in the vehicle model equations
# vel_water is used for the water current velocity (at vehicle depth)
self.vel_model = np.zeros(6, dtype=np.float64)
self.vel_water = np.zeros(6, dtype=np.float64)
# water currents
self.water_mu = kwargs.get('water_mu', 0.0) # gauss-markov coeff (if zero: pure gaussian)
self.water_sigma = kwargs.get('water_sigma', 0.001) # normal distribution (mu, sigma)
self.water_max = kwargs.get('water_surf', 0.0) # surface speed of water current (maximum)
self.water_min = 0.0 # speed (minimum)
self.water_spd = 0.5 * (self.water_max + self.water_min) # speed (initial)
self.water_b = kwargs.get('water_b', 0.0) # angle of attack azimuth (radians)
self.water_b_sigma = kwargs.get('water_b_sigma', 0.001) # angle of attack azimuth variance (radians)
self.water_a = kwargs.get('water_a', 0.0) # angle of attack elevation (radians)
self.water_a_sigma = kwargs.get('water_a_sigma', 0.001) # angle of attack elevation variance (radians)
# rk related
self.rk4_state = np.concatenate((self.pos, self.vel)) # NOTE: review this with body frame global conversion
# init the rng (aiming for repeatable experiments)
np.random.seed(DEFAULT_SEED)
def reset(self):
"""Resets the state of the navigation simulator by setting to zero the internal state"""
self.pos = np.zeros(6, dtype=np.float64)
self.vel = np.zeros(6, dtype=np.float64)
self.acc = np.zeros(6, dtype=np.float64)
self.tau = np.zeros(6, dtype=np.float64)
self.pos_prev = np.zeros(6, dtype=np.float64)
def update_water_current(self, v, sigma_v, mu, b, sigma_b, a, sigma_a):
"""Updates the water current model used inside the navigation simulator"""
# current speed
self.water_max = np.maximum(0.0, v)
self.water_mu = np.clip(mu, 0.0, 1.0)
self.water_sigma = np.maximum(0.001, sigma_v)
# init speed
self.water_spd = 0.5 * (self.water_max + self.water_min)
# current directions
self.water_b = cnv.wrap_pi(b)
self.water_b_sigma = np.maximum(0.001, sigma_b)
self.water_a = cnv.wrap_pi(a)
self.water_a_sigma = np.maximum(0.001, sigma_a)
def calc_currents(self):
"""Updates the vel_model variable used for the dynamic model equations based on the current state.
First the velocity of the water current is calculated based on vehicle position, direction of the currents and
the sea state. Later the water current velocity is added to the actual vehicle velocity.
See: T. Fossen - Guidance and Control of Ocean Vehicles - Section 3.4 Ocean Currents
"""
Cza = np.eye(3)
Cyb = np.eye(3)
# water flow orientation model with added noise
a = np.random.normal(self.water_a, self.water_a_sigma)
b = np.random.normal(self.water_b, self.water_b_sigma)
Cza[0, 0] = np.cos(a)
Cza[0, 2] = -np.sin(a)
Cza[2, 0] = -np.sin(a)
Cza[2, 2] = np.cos(a)
Cyb[0, 0] = np.cos(-b)
Cyb[0, 1] = np.sin(-b)
Cyb[1, 0] = -np.sin(-b)
Cyb[1, 1] = np.cos(-b)
# water velocity (normal model)
#spd = np.random.normal(self.ws_max, self.water_sigma)
# water velocity (first order gauss-markov process with boundaries)
ws_dot_int = (self.water_mu * self.water_spd + np.random.normal(0.0, self.water_sigma)) * self.dt
ws_new = self.water_spd + ws_dot_int
if ws_new > self.water_max or ws_new < self.water_min:
self.water_spd = self.water_spd - ws_dot_int
else:
self.water_spd = ws_new
# assign water speed (scalar) and enforce boundaries
# this allows to exclude the water current if vc_min = vc_max = 0.0
self.water_spd = np.clip(self.water_spd, self.water_min, self.water_max)
# calculate the water velocity
# this assumes a single layer below the surface (with constant behaviour for the first 10 meters)
# and logarithmic decay with the increase of depth
if self.pos[2] > 10.0:
self.water_spd = self.water_spd * np.log10(1 + ((9.0 * self.pos[2]) / (self.depth_bottom - self.pos[2])))
# calculate the velocity vector
spd_vect = np.array([self.water_spd, 0.0, 0.0], dtype=np.float64)
water_ef = np.dot(Cza, np.dot(Cyb, spd_vect.reshape((-1, 1))))
self.vel_water = np.dot(self.J_inv[0:3, 0:3], water_ef).flatten()
self.vel_model[0:5] = self.vel[0:5] # copy the values in the vel_model (using slicing)
self.vel_model[0:3] += self.vel_water # add water currents
def int_naive(self):
"""naive integration"""
# calculate acceleration from forces using the dynamic model
self.acc = self.model.update_acceleration(self.tau, self.pos, self.vel_model)
# integration of velocity and convert to earth-fixed reference
self.vel = self.vel + (self.acc * self.dt)
self.J = dm.update_jacobian(self.J, self.pos[3], self.pos[4], self.pos[5])
self.J_inv = np.linalg.pinv(self.J)
vel_efec = np.dot(self.J, self.vel.reshape((6, 1))).flatten()
# integration of position (double term integrator)
self.pos = self.pos + (vel_efec * self.dt) + 0.5 * (self.acc * self.dt * self.dt)
def int_velocity_verlet(self):
"""velocity verlet integrator
[1]: http://en.wikipedia.org/wiki/Verlet_integration
[2]: http://research.ncl.ac.uk/game/mastersdegree/gametechnologies/physicsnumericalintegration/Physics%20Tutorial%202%20-%20Numerical%20Integration.pdf
"""
# calculate acceleration from forces using the dynamic model
acc_prev = self.model.update_acceleration(self.tau, self.pos, self.vel_model)
# convert velocity to earth-fixed reference
self.J = dm.update_jacobian(self.J, self.pos[3], self.pos[4], self.pos[5])
self.J_inv = np.linalg.pinv(self.J)
vel_efec = np.dot(self.J, self.vel.reshape((6, 1))).flatten()
acc_efec = np.dot(self.J, acc_prev.reshape((6, 1))).flatten()
# update position
self.pos = self.pos + (vel_efec * self.dt) + 0.5 * (acc_efec * self.dt * self.dt)
# compute the new velocity from forces using the dynamic model
self.acc = self.model.update_acceleration(self.tau, self.pos, self.vel)
self.vel = self.vel + 0.5 * (acc_prev + self.acc) * self.dt
# Runge-Kutta integration method:
# - rk4_derivative: this function update the state using derivatives
# - rk4: this function implements a RK4 integration method
def rk4_derivative(self, t, state):
pos = state[0:6]
vel = state[6:12]
# invoke main computation
acc = self.model.update_acceleration(self.tau, pos, vel)
# convert velocity to global coordinates as we want position in global coordinates
self.J = dm.update_jacobian(self.J, self.pos[3], self.pos[4], self.pos[5])
self.J_inv = np.linalg.pinv(self.J)
vel_efec = np.dot(self.J, vel.reshape((6, 1))).flatten()
return np.concatenate((vel_efec, acc))
def rk4(self, x, h, y, f):
k1 = f(x, y)
k2 = f(x + 0.5 * h, y + 0.5 * h * k1)
k3 = f(x + 0.5 * h, y + 0.5 * h * k2)
k4 = f(x + h, y + h * k3)
return x + h, y + ((h / 6.0) * (k1 + 2 * (k2 + k3) + k4))
def update(self, tau):
"""Updates the state of the simulated vehicle by applying the forces of vector tau.
:param tau: forces acting on the vehicle in body-frame coordinates (ie. thrusters)
"""
# simulation step
self.tau = tau
self.t += self.dt
# take into account the water currents
self.calc_currents()
# simple integration
#self.int_naive()
# improved integration accuracy
self.int_velocity_verlet()
# RK4 integration
# self.t += self.dt
# self.t, self.rk4_state = self.rk4(self.t, self.dt, self.rk4_state, self.rk4_derivative)
# self.vel = self.rk4_state[6:12] # velocity and position are already in body frame
# self.pos = self.rk4_state[0:6] # position is the integration of the velocity in body frame (for RK4)
# wrap angles and limit pitch (-90 / 90)
self.pos[3:6] = cnv.wrap_pi(self.pos[3:6])
self.pos[4] = np.clip(self.pos[4], -MAX_PITCH, MAX_PITCH)
# prevents the vehicle to fly to high! :)
# remember that a negative depth in NED frame means you are above the surface
if self.pos[2] <= MIN_DEPTH:
self.acc[2] = 0
self.vel[2] = 0
self.pos[2] = MIN_DEPTH
def __str__(self):
return CONSOLE_STATUS % (
self.tau, self.model.F_net,
self.acc, self.vel,
self.pos[0:3], np.rad2deg(self.pos[3:6]),
self.depth_bottom,
self.vel_water
)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions useful when writing scripts that integrate with GN.
The main functions are ToGNString and FromGNString which convert between
serialized GN veriables and Python variables.
To use in a random python file in the build:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, "build"))
import gn_helpers
Where the sequence of parameters to join is the relative path from your source
file to the build directory."""
class GNException(Exception):
pass
def ToGNString(value, allow_dicts = True):
"""Prints the given value to stdout.
allow_dicts indicates if this function will allow converting dictionaries
to GN scopes. This is only possible at the top level, you can't nest a
GN scope in a list, so this should be set to False for recursive calls."""
if isinstance(value, str):
if value.find('\n') >= 0:
raise GNException("Trying to print a string with a newline in it.")
return '"' + \
value.replace('\\', '\\\\').replace('"', '\\"').replace('$', '\\$') + \
'"'
if isinstance(value, unicode):
return ToGNString(value.encode('utf-8'))
if isinstance(value, bool):
if value:
return "true"
return "false"
if isinstance(value, list):
return '[ %s ]' % ', '.join(ToGNString(v) for v in value)
if isinstance(value, dict):
if not allow_dicts:
raise GNException("Attempting to recursively print a dictionary.")
result = ""
for key in value:
if not isinstance(key, str):
raise GNException("Dictionary key is not a string.")
result += "%s = %s\n" % (key, ToGNString(value[key], False))
return result
if isinstance(value, int):
return str(value)
raise GNException("Unsupported type when printing to GN.")
def FromGNString(input):
"""Converts the input string from a GN serialized value to Python values.
For details on supported types see GNValueParser.Parse() below.
If your GN script did:
something = [ "file1", "file2" ]
args = [ "--values=$something" ]
The command line would look something like:
--values="[ \"file1\", \"file2\" ]"
Which when interpreted as a command line gives the value:
[ "file1", "file2" ]
You can parse this into a Python list using GN rules with:
input_values = FromGNValues(options.values)
Although the Python 'ast' module will parse many forms of such input, it
will not handle GN escaping properly, nor GN booleans. You should use this
function instead.
A NOTE ON STRING HANDLING:
If you just pass a string on the command line to your Python script, or use
string interpolation on a string variable, the strings will not be quoted:
str = "asdf"
args = [ str, "--value=$str" ]
Will yield the command line:
asdf --value=asdf
The unquoted asdf string will not be valid input to this function, which
accepts only quoted strings like GN scripts. In such cases, you can just use
the Python string literal directly.
The main use cases for this is for other types, in particular lists. When
using string interpolation on a list (as in the top example) the embedded
strings will be quoted and escaped according to GN rules so the list can be
re-parsed to get the same result."""
parser = GNValueParser(input)
return parser.Parse()
def FromGNArgs(input):
"""Converts a string with a bunch of gn arg assignments into a Python dict.
Given a whitespace-separated list of
<ident> = (integer | string | boolean | <list of the former>)
gn assignments, this returns a Python dict, i.e.:
FromGNArgs("foo=true\nbar=1\n") -> { 'foo': True, 'bar': 1 }.
Only simple types and lists supported; variables, structs, calls
and other, more complicated things are not.
This routine is meant to handle only the simple sorts of values that
arise in parsing --args.
"""
parser = GNValueParser(input)
return parser.ParseArgs()
def UnescapeGNString(value):
"""Given a string with GN escaping, returns the unescaped string.
Be careful not to feed with input from a Python parsing function like
'ast' because it will do Python unescaping, which will be incorrect when
fed into the GN unescaper."""
result = ''
i = 0
while i < len(value):
if value[i] == '\\':
if i < len(value) - 1:
next_char = value[i + 1]
if next_char in ('$', '"', '\\'):
# These are the escaped characters GN supports.
result += next_char
i += 1
else:
# Any other backslash is a literal.
result += '\\'
else:
result += value[i]
i += 1
return result
def _IsDigitOrMinus(char):
return char in "-0123456789"
class GNValueParser(object):
"""Duplicates GN parsing of values and converts to Python types.
Normally you would use the wrapper function FromGNValue() below.
If you expect input as a specific type, you can also call one of the Parse*
functions directly. All functions throw GNException on invalid input. """
def __init__(self, string):
self.input = string
self.cur = 0
def IsDone(self):
return self.cur == len(self.input)
def ConsumeWhitespace(self):
while not self.IsDone() and self.input[self.cur] in ' \t\n':
self.cur += 1
def Parse(self):
"""Converts a string representing a printed GN value to the Python type.
See additional usage notes on FromGNString above.
- GN booleans ('true', 'false') will be converted to Python booleans.
- GN numbers ('123') will be converted to Python numbers.
- GN strings (double-quoted as in '"asdf"') will be converted to Python
strings with GN escaping rules. GN string interpolation (embedded
variables preceeded by $) are not supported and will be returned as
literals.
- GN lists ('[1, "asdf", 3]') will be converted to Python lists.
- GN scopes ('{ ... }') are not supported."""
result = self._ParseAllowTrailing()
self.ConsumeWhitespace()
if not self.IsDone():
raise GNException("Trailing input after parsing:\n " +
self.input[self.cur:])
return result
def ParseArgs(self):
"""Converts a whitespace-separated list of ident=literals to a dict.
See additional usage notes on FromGNArgs, above.
"""
d = {}
self.ConsumeWhitespace()
while not self.IsDone():
ident = self._ParseIdent()
self.ConsumeWhitespace()
if self.input[self.cur] != '=':
raise GNException("Unexpected token: " + self.input[self.cur:])
self.cur += 1
self.ConsumeWhitespace()
val = self._ParseAllowTrailing()
self.ConsumeWhitespace()
d[ident] = val
return d
def _ParseAllowTrailing(self):
"""Internal version of Parse that doesn't check for trailing stuff."""
self.ConsumeWhitespace()
if self.IsDone():
raise GNException("Expected input to parse.")
next_char = self.input[self.cur]
if next_char == '[':
return self.ParseList()
elif _IsDigitOrMinus(next_char):
return self.ParseNumber()
elif next_char == '"':
return self.ParseString()
elif self._ConstantFollows('true'):
return True
elif self._ConstantFollows('false'):
return False
else:
raise GNException("Unexpected token: " + self.input[self.cur:])
def _ParseIdent(self):
id = ''
next_char = self.input[self.cur]
if not next_char.isalpha() and not next_char=='_':
raise GNException("Expected an identifier: " + self.input[self.cur:])
id += next_char
self.cur += 1
next_char = self.input[self.cur]
while next_char.isalpha() or next_char.isdigit() or next_char=='_':
id += next_char
self.cur += 1
next_char = self.input[self.cur]
return id
def ParseNumber(self):
self.ConsumeWhitespace()
if self.IsDone():
raise GNException('Expected number but got nothing.')
begin = self.cur
# The first character can include a negative sign.
if not self.IsDone() and _IsDigitOrMinus(self.input[self.cur]):
self.cur += 1
while not self.IsDone() and self.input[self.cur].isdigit():
self.cur += 1
number_string = self.input[begin:self.cur]
if not len(number_string) or number_string == '-':
raise GNException("Not a valid number.")
return int(number_string)
def ParseString(self):
self.ConsumeWhitespace()
if self.IsDone():
raise GNException('Expected string but got nothing.')
if self.input[self.cur] != '"':
raise GNException('Expected string beginning in a " but got:\n ' +
self.input[self.cur:])
self.cur += 1 # Skip over quote.
begin = self.cur
while not self.IsDone() and self.input[self.cur] != '"':
if self.input[self.cur] == '\\':
self.cur += 1 # Skip over the backslash.
if self.IsDone():
raise GNException("String ends in a backslash in:\n " +
self.input)
self.cur += 1
if self.IsDone():
raise GNException('Unterminated string:\n ' + self.input[begin:])
end = self.cur
self.cur += 1 # Consume trailing ".
return UnescapeGNString(self.input[begin:end])
def ParseList(self):
self.ConsumeWhitespace()
if self.IsDone():
raise GNException('Expected list but got nothing.')
# Skip over opening '['.
if self.input[self.cur] != '[':
raise GNException("Expected [ for list but got:\n " +
self.input[self.cur:])
self.cur += 1
self.ConsumeWhitespace()
if self.IsDone():
raise GNException("Unterminated list:\n " + self.input)
list_result = []
previous_had_trailing_comma = True
while not self.IsDone():
if self.input[self.cur] == ']':
self.cur += 1 # Skip over ']'.
return list_result
if not previous_had_trailing_comma:
raise GNException("List items not separated by comma.")
list_result += [ self._ParseAllowTrailing() ]
self.ConsumeWhitespace()
if self.IsDone():
break
# Consume comma if there is one.
previous_had_trailing_comma = self.input[self.cur] == ','
if previous_had_trailing_comma:
# Consume comma.
self.cur += 1
self.ConsumeWhitespace()
raise GNException("Unterminated list:\n " + self.input)
def _ConstantFollows(self, constant):
"""Returns true if the given constant follows immediately at the current
location in the input. If it does, the text is consumed and the function
returns true. Otherwise, returns false and the current position is
unchanged."""
end = self.cur + len(constant)
if end > len(self.input):
return False # Not enough room.
if self.input[self.cur:end] == constant:
self.cur = end
return True
return False
|
|
from __future__ import print_function, division
import inspect
from sympy.core.cache import cacheit
from sympy.core.singleton import S
from sympy.core.sympify import _sympify
from sympy.logic.boolalg import Boolean
from sympy.utilities.source import get_class
from contextlib import contextmanager
class AssumptionsContext(set):
"""Set representing assumptions.
This is used to represent global assumptions, but you can also use this
class to create your own local assumptions contexts. It is basically a thin
wrapper to Python's set, so see its documentation for advanced usage.
Examples
========
>>> from sympy import AppliedPredicate, Q
>>> from sympy.assumptions.assume import global_assumptions
>>> global_assumptions
AssumptionsContext()
>>> from sympy.abc import x
>>> global_assumptions.add(Q.real(x))
>>> global_assumptions
AssumptionsContext([Q.real(x)])
>>> global_assumptions.remove(Q.real(x))
>>> global_assumptions
AssumptionsContext()
>>> global_assumptions.clear()
"""
def add(self, *assumptions):
"""Add an assumption."""
for a in assumptions:
super(AssumptionsContext, self).add(a)
global_assumptions = AssumptionsContext()
class AppliedPredicate(Boolean):
"""The class of expressions resulting from applying a Predicate.
Examples
========
>>> from sympy import Q, Symbol
>>> x = Symbol('x')
>>> Q.integer(x)
Q.integer(x)
>>> type(Q.integer(x))
<class 'sympy.assumptions.assume.AppliedPredicate'>
"""
__slots__ = []
def __new__(cls, predicate, arg):
if not isinstance(arg, bool):
# XXX: There is not yet a Basic type for True and False
arg = _sympify(arg)
return Boolean.__new__(cls, predicate, arg)
is_Atom = True # do not attempt to decompose this
@property
def arg(self):
"""
Return the expression used by this assumption.
Examples
========
>>> from sympy import Q, Symbol
>>> x = Symbol('x')
>>> a = Q.integer(x + 1)
>>> a.arg
x + 1
"""
return self._args[1]
@property
def args(self):
return self._args[1:]
@property
def func(self):
return self._args[0]
@cacheit
def sort_key(self, order=None):
return self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One
def __eq__(self, other):
if type(other) is AppliedPredicate:
return self._args == other._args
return False
def __hash__(self):
return super(AppliedPredicate, self).__hash__()
def _eval_ask(self, assumptions):
return self.func.eval(self.arg, assumptions)
class Predicate(Boolean):
"""A predicate is a function that returns a boolean value.
Predicates merely wrap their argument and remain unevaluated:
>>> from sympy import Q, ask, Symbol, S
>>> x = Symbol('x')
>>> Q.prime(7)
Q.prime(7)
To obtain the truth value of an expression containing predicates, use
the function `ask`:
>>> ask(Q.prime(7))
True
The tautological predicate `Q.is_true` can be used to wrap other objects:
>>> Q.is_true(x > 1)
Q.is_true(x > 1)
>>> Q.is_true(S(1) < x)
Q.is_true(1 < x)
"""
is_Atom = True
def __new__(cls, name, handlers=None):
obj = Boolean.__new__(cls)
obj.name = name
obj.handlers = handlers or []
return obj
def _hashable_content(self):
return (self.name,)
def __getnewargs__(self):
return (self.name,)
def __call__(self, expr):
return AppliedPredicate(self, expr)
def add_handler(self, handler):
self.handlers.append(handler)
def remove_handler(self, handler):
self.handlers.remove(handler)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One
def eval(self, expr, assumptions=True):
"""
Evaluate self(expr) under the given assumptions.
This uses only direct resolution methods, not logical inference.
"""
res, _res = None, None
mro = inspect.getmro(type(expr))
for handler in self.handlers:
cls = get_class(handler)
for subclass in mro:
try:
eval = getattr(cls, subclass.__name__)
except AttributeError:
continue
res = eval(expr, assumptions)
# Do not stop if value returned is None
# Try to check for higher classes
if res is None:
continue
if _res is None:
_res = res
elif res is None:
# since first resolutor was conclusive, we keep that value
res = _res
else:
# only check consistency if both resolutors have concluded
if _res != res:
raise ValueError('incompatible resolutors')
break
return res
@contextmanager
def assuming(*assumptions):
""" Context manager for assumptions
Examples
========
>>> from sympy.assumptions import assuming, Q, ask
>>> from sympy.abc import x, y
>>> print(ask(Q.integer(x + y)))
None
>>> with assuming(Q.integer(x), Q.integer(y)):
... print(ask(Q.integer(x + y)))
True
"""
old_global_assumptions = global_assumptions.copy()
global_assumptions.update(assumptions)
try:
yield
finally:
global_assumptions.clear()
global_assumptions.update(old_global_assumptions)
|
|
import logging
import json
import datetime
import pytz
from rdr_service.lib_fhir.fhirclient_1_0_6.models import fhirdate
from rdr_service.lib_fhir.fhirclient_1_0_6.models.backboneelement import BackboneElement
from rdr_service.lib_fhir.fhirclient_1_0_6.models.domainresource import DomainResource
from rdr_service.lib_fhir.fhirclient_1_0_6.models.fhirdate import FHIRDate
from rdr_service.lib_fhir.fhirclient_1_0_6.models.identifier import Identifier
from rdr_service.lib_fhir.fhirclient_1_0_6.models.address import Address
from sqlalchemy import or_, cast, Date, and_
from sqlalchemy.orm import subqueryload, joinedload
from werkzeug.exceptions import BadRequest, Conflict, PreconditionFailed, ServiceUnavailable
from rdr_service.services.mayolink_client import MayoLinkClient, MayoLinkOrder, MayolinkQuestion, MayoLinkTest
from rdr_service import clock
from rdr_service.api_util import get_site_id_by_site_value as get_site, format_json_code
from rdr_service.app_util import get_account_origin_id
from rdr_service.code_constants import BIOBANK_TESTS_SET, HEALTHPRO_USERNAME_SYSTEM, SITE_ID_SYSTEM, \
QUEST_SITE_ID_SYSTEM, QUEST_BIOBANK_ORDER_ORIGIN, KIT_ID_SYSTEM, QUEST_USERNAME_SYSTEM
from rdr_service.dao.base_dao import FhirMixin, FhirProperty, UpdatableDao
from rdr_service.dao.participant_dao import ParticipantDao, raise_if_withdrawn
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.dao.site_dao import SiteDao
from rdr_service.dao.code_dao import CodeDao
from rdr_service.model.biobank_order import (
BiobankOrder,
BiobankOrderHistory,
BiobankOrderIdentifier,
BiobankOrderIdentifierHistory,
BiobankOrderedSample,
BiobankOrderedSampleHistory,
MayolinkCreateOrderHistory,
BiobankQuestOrderSiteAddress
)
from rdr_service.model.log_position import LogPosition
from rdr_service.model.participant import Participant
from rdr_service.model.utils import to_client_participant_id
from rdr_service.participant_enums import BiobankOrderStatus, OrderStatus
from rdr_service.model.config_utils import to_client_biobank_id
from rdr_service.code_constants import UNMAPPED, UNSET
# Timezones for MayoLINK
_UTC = pytz.utc
_US_CENTRAL = pytz.timezone("US/Central")
FEDEX_TRACKING_NUMBER_URL = 'https://fedex.com/tracking-number'
def _ToFhirDate(dt):
if not dt:
return None
return FHIRDate.with_json(dt.isoformat())
class _FhirBiobankOrderNotes(FhirMixin, BackboneElement):
"""Notes sub-element."""
resource_name = "BiobankOrderNotes"
_PROPERTIES = [FhirProperty("collected", str), FhirProperty("processed", str), FhirProperty("finalized", str)]
class _FhirBiobankOrderedSample(FhirMixin, BackboneElement):
"""Sample sub-element."""
resource_name = "BiobankOrderedSample"
_PROPERTIES = [
FhirProperty("test", str, required=True),
FhirProperty("description", str, required=True),
FhirProperty("processing_required", bool, required=True),
FhirProperty("collected", fhirdate.FHIRDate),
FhirProperty("processed", fhirdate.FHIRDate),
FhirProperty("finalized", fhirdate.FHIRDate),
]
class _FhirBiobankOrderHandlingInfo(FhirMixin, BackboneElement):
"""Information about what user and site handled an order."""
resource_name = "BiobankOrderHandlingInfo"
_PROPERTIES = [FhirProperty("author", Identifier), FhirProperty("site", Identifier),
FhirProperty("address", Address)]
class _FhirBiobankOrder(FhirMixin, DomainResource):
"""FHIR client definition of the expected JSON structure for a BiobankOrder resource."""
resource_name = "BiobankOrder"
_PROPERTIES = [
FhirProperty("subject", str, required=True),
FhirProperty("identifier", Identifier, is_list=True, required=True),
FhirProperty("created", fhirdate.FHIRDate, required=True),
FhirProperty("samples", _FhirBiobankOrderedSample, is_list=True, required=True),
FhirProperty("notes", _FhirBiobankOrderNotes),
FhirProperty("created_info", _FhirBiobankOrderHandlingInfo),
FhirProperty("collected_info", _FhirBiobankOrderHandlingInfo),
FhirProperty("processed_info", _FhirBiobankOrderHandlingInfo),
FhirProperty("finalized_info", _FhirBiobankOrderHandlingInfo),
FhirProperty("cancelledInfo", _FhirBiobankOrderHandlingInfo),
FhirProperty("restoredInfo", _FhirBiobankOrderHandlingInfo),
FhirProperty("restoredSiteId", int, required=False),
FhirProperty("restoredUsername", str, required=False),
FhirProperty("amendedInfo", _FhirBiobankOrderHandlingInfo),
FhirProperty("version", int, required=False),
FhirProperty("status", str, required=False),
FhirProperty("amendedReason", str, required=False),
FhirProperty("origin", str, required=False)
]
class BiobankOrderDao(UpdatableDao):
def __init__(self):
super(BiobankOrderDao, self).__init__(BiobankOrder)
def get_id(self, obj):
return obj.biobankOrderId
def _order_as_dict(self, order):
result = order.asdict(follow={"identifiers": {}, "samples": {}})
result["version"] = int(result["version"])
if result["orderStatus"] is None:
result["orderStatus"] = BiobankOrderStatus.UNSET
del result["created"]
del result["logPositionId"]
for identifier in result.get("identifiers", []):
del identifier["biobankOrderId"]
samples = result.get("samples")
if samples:
for sample in samples:
del sample["biobankOrderId"]
return result
def insert_with_session(self, session, obj):
obj.version = 1
if obj.logPosition is not None:
raise BadRequest(f"{self.model_type.__name__}.logPosition must be auto-generated.")
obj.logPosition = LogPosition()
if obj.biobankOrderId is None:
raise BadRequest("Client must supply biobankOrderId.")
existing_order = self.get_with_children_in_session(session, obj.biobankOrderId)
if existing_order:
existing_order_dict = self._order_as_dict(existing_order)
new_dict = self._order_as_dict(obj)
if existing_order_dict == new_dict:
# If an existing matching order exists, just return it without trying to create it again.
return existing_order
else:
raise Conflict(f"Order with ID {obj.biobankOrderId} already exists")
self._update_participant_summary(session, obj)
inserted_obj = super(BiobankOrderDao, self).insert_with_session(session, obj)
if inserted_obj.collectedSiteId is not None:
ParticipantDao().add_missing_hpo_from_site(
session, inserted_obj.participantId, inserted_obj.collectedSiteId
)
self._update_history(session, obj)
return inserted_obj
def handle_list_queries(self, **kwargs):
participant_id = kwargs.get('participant_id')
kit_id = kwargs.get('kit_id')
state = kwargs.get('state')
city = kwargs.get('city')
zip_code = kwargs.get('zip_code')
start_date = kwargs.get('start_date')
end_date = kwargs.get('end_date')
origin = kwargs.get('origin') if kwargs.get('origin') else QUEST_BIOBANK_ORDER_ORIGIN
page = kwargs.get('page') if kwargs.get('page') else 1
page_size = kwargs.get('page_size') if kwargs.get('page_size') else 10
if participant_id:
# return all biobank order by participant id
items = self.get_biobank_orders_with_children_for_participant(participant_id)
result = {'data': [], 'total': len(items)}
for item in items:
response_json = self.to_client_json(item)
result['data'].append(response_json)
return result
elif kit_id:
# return all biobank order by kit id
items = self.get_biobank_order_by_kit_id(kit_id)
result = {'data': [], 'total': len(items)}
for item in items:
response_json = self.to_client_json(item)
result['data'].append(response_json)
return result
else:
total, items = self.get_biobank_order_by_search_criteria(start_date, end_date, state, city, zip_code,
int(page), int(page_size), origin)
result = {
"total": total,
"page": int(page),
"pageSize": int(page_size),
"startDate": start_date,
"endDate": end_date,
"origin": origin,
"state": state,
"city": city,
"zipCode": zip_code,
"data": []
}
for item in items:
response_json = self.to_client_json(item[0])
response_json['biobankId'] = str(to_client_biobank_id(item[1]))
result['data'].append(response_json)
return result
def get_biobank_order_by_search_criteria(self, start_date, end_date, state, city, zip_code, page, page_size,
origin):
states = state.split(',') if state else []
cities = city.split(',') if city else []
zip_codes = zip_code.split(',') if zip_code else []
date_format = "%Y-%m-%d"
offset = (page - 1) * page_size
if offset < 0:
raise BadRequest("invalid parameter: page")
if start_date:
try:
start_date = datetime.datetime.strptime(start_date, date_format).date()
except ValueError:
raise BadRequest("Invalid start date: {}".format(start_date))
if end_date:
try:
end_date = datetime.datetime.strptime(end_date, date_format).date()
except ValueError:
raise BadRequest("Invalid end date: {}".format(end_date))
with self.session() as session:
total_query = session.query(BiobankOrder)\
.outerjoin(BiobankQuestOrderSiteAddress,
BiobankQuestOrderSiteAddress.biobankOrderId == BiobankOrder.biobankOrderId)
total_query = self._add_filter_for_biobank_order_search(total_query, states, cities, zip_codes, origin,
start_date, end_date)
total = total_query.count()
query = session.query(BiobankOrder, Participant.biobankId)\
.options(joinedload(BiobankOrder.identifiers), joinedload(BiobankOrder.samples),
joinedload(BiobankOrder.questSiteAddress))\
.join(Participant, Participant.participantId == BiobankOrder.participantId) \
.outerjoin(BiobankQuestOrderSiteAddress,
BiobankQuestOrderSiteAddress.biobankOrderId == BiobankOrder.biobankOrderId)
query = self._add_filter_for_biobank_order_search(query, states, cities, zip_codes, origin, start_date,
end_date)
query = query.order_by(BiobankOrder.created).limit(page_size).offset(offset)
items = query.all()
return total, items
def _add_filter_for_biobank_order_search(self, query, states, cities, zip_codes, origin, start_date, end_date):
if states:
query = query.filter(BiobankQuestOrderSiteAddress.state.in_(states))
if cities:
query = query.filter(BiobankQuestOrderSiteAddress.city.in_(cities))
if zip_codes:
query = query.filter(BiobankQuestOrderSiteAddress.zipCode.in_(zip_codes))
if origin:
query = query.filter(BiobankOrder.orderOrigin == origin)
if start_date and end_date:
query = query.filter(
or_(
and_(
cast(BiobankOrder.created, Date) >= start_date,
cast(BiobankOrder.created, Date) <= end_date
),
and_(
cast(BiobankOrder.finalizedTime, Date) >= start_date,
cast(BiobankOrder.finalizedTime, Date) <= end_date
)
),
)
return query
def _validate_model(self, session, obj):
if obj.participantId is None:
raise BadRequest("participantId is required")
participant_summary = ParticipantSummaryDao().get_with_session(session, obj.participantId)
if not participant_summary:
raise BadRequest(f"Can't submit order for participant {obj.participantId} without consent")
raise_if_withdrawn(participant_summary)
for sample in obj.samples:
self._validate_order_sample(sample)
# TODO(mwf) FHIR validation for identifiers?
# Verify that no identifier is in use by another order.
for identifier in obj.identifiers:
if identifier.system != FEDEX_TRACKING_NUMBER_URL: # skip the check for fedex tracking numbers
for existing in (
session.query(BiobankOrderIdentifier).filter(
BiobankOrderIdentifier.system == identifier.system,
BiobankOrderIdentifier.value == identifier.value,
BiobankOrderIdentifier.biobankOrderId != obj.biobankOrderId
)
):
raise BadRequest(f"Identifier {identifier} is already in use by order {existing.biobankOrderId}")
def _validate_order_sample(self, sample):
# TODO(mwf) Make use of FHIR validation?
if sample.test not in BIOBANK_TESTS_SET:
raise BadRequest(f"Invalid test value {sample.test} not in {BIOBANK_TESTS_SET}.")
def get_with_session(self, session, obj_id, **kwargs):
result = super(BiobankOrderDao, self).get_with_session(session, obj_id, **kwargs)
if result:
ParticipantDao().validate_participant_reference(session, result)
return result
def get_with_children_in_session(self, session, obj_id, for_update=False):
query = session.query(BiobankOrder).options(
subqueryload(BiobankOrder.identifiers), subqueryload(BiobankOrder.samples),
subqueryload(BiobankOrder.questSiteAddress)
)
if for_update:
query = query.with_for_update()
existing_obj = query.get(obj_id)
return existing_obj
def get_with_children(self, obj_id):
with self.session() as session:
return self.get_with_children_in_session(session, obj_id)
def get_biobank_orders_for_participant(self, pid):
"""Retrieves all ordered samples for a participant."""
with self.session() as session:
return session.query(BiobankOrder).filter(BiobankOrder.participantId == pid).all()
def get_biobank_orders_with_children_for_participant(self, pid):
"""Retrieves all ordered with children for a participant."""
if pid is None:
raise BadRequest("invalid participant id")
with self.session() as session:
return session.query(BiobankOrder).\
options(subqueryload(BiobankOrder.identifiers), subqueryload(BiobankOrder.samples),
subqueryload(BiobankOrder.questSiteAddress)).\
filter(BiobankOrder.participantId == pid).all()
def get_biobank_order_by_kit_id(self, kit_id):
if kit_id is None:
raise BadRequest("invalid kit id")
with self.session() as session:
return (session.query(BiobankOrder).
join(BiobankOrderIdentifier).
options(subqueryload(BiobankOrder.identifiers), subqueryload(BiobankOrder.samples),
subqueryload(BiobankOrder.questSiteAddress)).
filter(BiobankOrder.biobankOrderId == BiobankOrderIdentifier.biobankOrderId,
BiobankOrderIdentifier.system == KIT_ID_SYSTEM,
BiobankOrderIdentifier.value == kit_id)
.all()
)
def get_ordered_samples_for_participant(self, participant_id):
"""Retrieves all ordered samples for a participant."""
with self.session() as session:
return (
session.query(BiobankOrderedSample)
.join(BiobankOrder)
.filter(BiobankOrder.participantId == participant_id)
.all()
)
def get_ordered_samples_sample(self, session, percentage, batch_size):
"""
Retrieves the biobank ID, collected time, and test for a percentage of ordered samples.
Used in fake data generation.
"""
return (
session.query(Participant.biobankId, BiobankOrderedSample.collected, BiobankOrderedSample.test)
.join(BiobankOrder, Participant.participantId == BiobankOrder.participantId)
.join(BiobankOrderedSample, BiobankOrder.biobankOrderId == BiobankOrderedSample.biobankOrderId)
.filter(Participant.biobankId % 100 < percentage * 100)
.yield_per(batch_size)
)
def insert_mayolink_create_order_history(self, mayolink_create_order_history):
with self.session() as session:
self.insert_mayolink_create_order_history_with_session(session, mayolink_create_order_history)
def insert_mayolink_create_order_history_with_session(self, session, mayolink_create_order_history):
session.add(mayolink_create_order_history)
def _get_order_status_and_time(self, sample, order):
if sample.finalized:
return (OrderStatus.FINALIZED, sample.finalized)
if sample.processed:
return (OrderStatus.PROCESSED, sample.processed)
if sample.collected:
return (OrderStatus.COLLECTED, sample.collected)
return (OrderStatus.CREATED, order.created)
def _update_participant_summary(self, session, obj):
""" called on insert"""
participant_summary_dao = ParticipantSummaryDao()
participant_summary = participant_summary_dao.get_for_update(session, obj.participantId)
if not participant_summary:
raise BadRequest(f"Can't submit biospecimens for participant {obj.participantId} without consent")
raise_if_withdrawn(participant_summary)
self._set_participant_summary_fields(obj, participant_summary)
participant_summary_dao.update_enrollment_status(participant_summary)
finalized_time = self.get_random_sample_finalized_time(obj)
is_distinct_visit = ParticipantSummaryDao().calculate_distinct_visits(
participant_summary.participantId, finalized_time, obj.biobankOrderId
)
if is_distinct_visit:
participant_summary.numberDistinctVisits += 1
def get_random_sample_finalized_time(self, obj):
"""all samples are set to same finalized time in an order, we only need one."""
for sample in obj.samples:
if sample.finalized is not None:
return sample.finalized
def _set_participant_summary_fields(self, obj, participant_summary):
participant_summary.biospecimenStatus = OrderStatus.FINALIZED
participant_summary.biospecimenOrderTime = obj.created
if obj.sourceSiteId or obj.collectedSiteId or obj.processedSiteId or obj.finalizedSiteId:
participant_summary.biospecimenSourceSiteId = obj.sourceSiteId
participant_summary.biospecimenCollectedSiteId = obj.collectedSiteId
participant_summary.biospecimenProcessedSiteId = obj.processedSiteId
participant_summary.biospecimenFinalizedSiteId = obj.finalizedSiteId
participant_summary.lastModified = clock.CLOCK.now()
for sample in obj.samples:
status_field = "sampleOrderStatus" + sample.test
status, time = self._get_order_status_and_time(sample, obj)
setattr(participant_summary, status_field, status)
setattr(participant_summary, status_field + "Time", time)
def _get_non_cancelled_biobank_orders(self, session, participantId):
# look up latest order without cancelled status
return (
session.query(BiobankOrder)
.filter(BiobankOrder.participantId == participantId)
.filter(or_(BiobankOrder.orderStatus != BiobankOrderStatus.CANCELLED, BiobankOrder.orderStatus == None))
.order_by(BiobankOrder.created)
.all()
)
def _refresh_participant_summary(self, session, obj):
# called when cancelled/restored/amended
participant_summary_dao = ParticipantSummaryDao()
participant_summary = participant_summary_dao.get_for_update(session, obj.participantId)
non_cancelled_orders = self._get_non_cancelled_biobank_orders(session, obj.participantId)
participant_summary.biospecimenStatus = OrderStatus.UNSET
participant_summary.biospecimenOrderTime = None
participant_summary.biospecimenSourceSiteId = None
participant_summary.biospecimenCollectedSiteId = None
participant_summary.biospecimenProcessedSiteId = None
participant_summary.biospecimenFinalizedSiteId = None
amendment = False
if obj.orderStatus == BiobankOrderStatus.AMENDED:
amendment = True
finalized_time = self.get_random_sample_finalized_time(obj)
is_distinct_visit = ParticipantSummaryDao().calculate_distinct_visits(
participant_summary.participantId, finalized_time, obj.biobankOrderId, amendment
)
if is_distinct_visit and obj.orderStatus != BiobankOrderStatus.CANCELLED:
participant_summary.numberDistinctVisits += 1
if (
obj.orderStatus == BiobankOrderStatus.CANCELLED
and participant_summary.numberDistinctVisits > 0
and is_distinct_visit
):
participant_summary.numberDistinctVisits -= 1
participant_summary.lastModified = clock.CLOCK.now()
for sample in obj.samples:
status_field = "sampleOrderStatus" + sample.test
setattr(participant_summary, status_field, OrderStatus.UNSET)
setattr(participant_summary, status_field + "Time", None)
if len(non_cancelled_orders) > 0:
for order in non_cancelled_orders:
self._set_participant_summary_fields(order, participant_summary)
participant_summary_dao.update_enrollment_status(participant_summary)
def _parse_handling_info(self, handling_info):
site_id = None
username = None
if handling_info.site:
if handling_info.site.system in [SITE_ID_SYSTEM, QUEST_SITE_ID_SYSTEM]:
site = SiteDao().get_by_google_group(handling_info.site.value)
if not site:
raise BadRequest(f"Unrecognized site: {handling_info.site.value}")
site_id = site.siteId
else:
raise BadRequest(f"Invalid site system: {handling_info.site.system}")
if handling_info.author:
if handling_info.author.system in [QUEST_USERNAME_SYSTEM, HEALTHPRO_USERNAME_SYSTEM]:
username = handling_info.author.value
else:
raise BadRequest(f"Invalid author system: {handling_info.author.system}")
return username, site_id
def _to_handling_info(self, username, site_id, address=None):
if not username and not site_id:
return None
info = _FhirBiobankOrderHandlingInfo()
if site_id:
site = SiteDao().get(site_id)
info.site = Identifier()
info.site.system = SITE_ID_SYSTEM
info.site.value = site.googleGroup
if username:
info.author = Identifier()
info.author.system = HEALTHPRO_USERNAME_SYSTEM
info.author.value = username
if address:
info.address = Address()
info.address.city = address.city
info.address.state = address.state
info.address.postalCode = address.zipCode
info.address.line = [address.address1, address.address2]
return info
# pylint: disable=unused-argument
def from_client_json(self, resource_json, id_=None, expected_version=None, participant_id=None, client_id=None):
resource = _FhirBiobankOrder(resource_json)
if not resource.created.date: # FHIR warns but does not error on bad date values.
raise BadRequest(f"Invalid created date {resource.created.origval}.")
order = BiobankOrder(participantId=participant_id, created=resource.created.date.replace(tzinfo=None))
order.orderOrigin = get_account_origin_id()
if not resource.created_info:
raise BadRequest("Created Info is required, but was missing in request.")
order.sourceUsername, order.sourceSiteId = self._parse_handling_info(resource.created_info)
order.collectedUsername, order.collectedSiteId = self._parse_handling_info(resource.collected_info)
if order.collectedSiteId is None:
raise BadRequest("Collected site is required in request.")
order.processedUsername, order.processedSiteId = self._parse_handling_info(resource.processed_info)
order.finalizedUsername, order.finalizedSiteId = self._parse_handling_info(resource.finalized_info)
if resource.notes:
order.collectedNote = resource.notes.collected
order.processedNote = resource.notes.processed
order.finalizedNote = resource.notes.finalized
if resource.subject != self._participant_id_to_subject(participant_id):
raise BadRequest(
f"Participant ID {participant_id} from path and {resource.subject} \
in request do not match, should be {self._participant_id_to_subject(participant_id)}."
)
biobank_order_id = id_
# if id_ is not None, that means it's for update, no need to create new mayolink order
if order.orderOrigin == QUEST_BIOBANK_ORDER_ORIGIN and id_ is None:
biobank_order_id = self._make_mayolink_order(participant_id, resource)
order.biobankOrderId = biobank_order_id
self._add_quest_site_address(order, resource)
self._add_identifiers_and_main_id(order, resource, biobank_order_id)
self._add_samples(order, resource)
# order.finalizedTime uses the time from biobank_ordered_sample.finalized
try:
order.finalizedTime = self.get_random_sample_finalized_time(resource).date.replace(tzinfo=None)
except AttributeError:
order.finalizedTime = None
if resource.amendedReason:
order.amendedReason = resource.amendedReason
if resource.amendedInfo:
order.amendedUsername, order.amendedSiteId = self._parse_handling_info(resource.amendedInfo)
order.version = expected_version
return order
def _make_mayolink_order(self, participant_id, resource):
mayo = MayoLinkClient()
summary = ParticipantSummaryDao().get(participant_id)
if not summary:
raise BadRequest("No summary for participant id: {}".format(participant_id))
code_dict = summary.asdict()
code_dao = CodeDao()
format_json_code(code_dict, code_dao, "genderIdentityId")
format_json_code(code_dict, code_dao, "stateId")
if "genderIdentity" in code_dict and code_dict["genderIdentity"]:
if code_dict["genderIdentity"] == "GenderIdentity_Woman":
gender_val = "F"
elif code_dict["genderIdentity"] == "GenderIdentity_Man":
gender_val = "M"
else:
gender_val = "U"
else:
gender_val = "U"
if not resource.samples:
raise BadRequest("No sample found in the payload")
kit_id = None
for item in resource.identifier:
if item.system == KIT_ID_SYSTEM:
kit_id = item.value
order = MayoLinkOrder(
collected_datetime_utc=resource.samples[0].collected.date,
number=kit_id,
biobank_id=summary.biobankId,
sex=gender_val,
address1=summary.streetAddress,
address2=summary.streetAddress2,
city=summary.city,
state=code_dict["state"][-2:] if code_dict["state"] not in (UNMAPPED, UNSET) else '',
postal_code=str(summary.zipCode),
phone=str(summary.phoneNumber),
race=str(summary.race),
tests=[]
)
test_codes = []
centrifuge_code_map = {
'1SS08': '1SSTP',
'1PS08': '1PSTP'
}
for sample in resource.samples:
test = MayoLinkTest(
code=sample.test,
name=sample.description
)
if sample.test in centrifuge_code_map:
test.questions = [MayolinkQuestion(
code=centrifuge_code_map[sample.test],
prompt=f'{centrifuge_code_map[sample.test][1:4]} Centrifuge Type',
answer='Swinging Bucket'
)]
order.tests.append(test)
test_codes.append(sample.test)
response = mayo.post(order)
try:
biobank_order_id = response["orders"]["order"]["number"]
mayo_order_status = response["orders"]["order"]["status"]
except KeyError:
raise ServiceUnavailable("Failed to get biobank order id from MayoLink API")
mayolink_create_order_history = MayolinkCreateOrderHistory()
mayolink_create_order_history.requestParticipantId = participant_id
mayolink_create_order_history.requestTestCode = ','.join(test_codes)
mayolink_create_order_history.requestOrderId = biobank_order_id
mayolink_create_order_history.requestOrderStatus = mayo_order_status
try:
mayolink_create_order_history.requestPayload = json.dumps(order)
mayolink_create_order_history.responsePayload = json.dumps(response)
except TypeError:
logging.info(f"TypeError when create mayolink_create_order_history")
self.insert_mayolink_create_order_history(mayolink_create_order_history)
return biobank_order_id
@classmethod
def _add_quest_site_address(cls, order, resource):
if resource.collected_info and resource.collected_info.address:
address = BiobankQuestOrderSiteAddress(city=resource.collected_info.address.city,
state=resource.collected_info.address.state,
zipCode=resource.collected_info.address.postalCode,
address1=resource.collected_info.address.line[0]
if resource.collected_info.address.line else None,
address2=resource.collected_info.address.line[1]
if len(resource.collected_info.address.line) > 1 else None)
order.questSiteAddress = address
else:
order.questSiteAddress = None
@classmethod
def _add_identifiers_and_main_id(cls, order, resource, biobank_order_id):
found_main_id = False
for i in resource.identifier:
order.identifiers.append(BiobankOrderIdentifier(system=i.system, value=i.value))
if i.system == BiobankOrder._MAIN_ID_SYSTEM:
order.biobankOrderId = i.value
found_main_id = True
if not found_main_id and biobank_order_id:
order.biobankOrderId = biobank_order_id
elif not found_main_id and biobank_order_id is None:
raise BadRequest(f"No identifier for system {BiobankOrder._MAIN_ID_SYSTEM}, required for primary key.")
@classmethod
def _add_samples(cls, order, resource):
all_tests = sorted([s.test for s in resource.samples])
if len(set(all_tests)) != len(all_tests):
raise BadRequest(f"Duplicate test in sample list for order: {all_tests}.")
for s in resource.samples:
order.samples.append(
BiobankOrderedSample(
biobankOrderId=order.biobankOrderId,
test=s.test,
description=s.description,
processingRequired=s.processing_required,
collected=s.collected and s.collected.date.replace(tzinfo=None),
processed=s.processed and s.processed.date.replace(tzinfo=None),
finalized=s.finalized and s.finalized.date.replace(tzinfo=None),
)
)
@classmethod
def _participant_id_to_subject(cls, participant_id):
return "Patient/%s" % to_client_participant_id(participant_id)
@classmethod
def _add_samples_to_resource(cls, resource, model):
resource.samples = []
for sample in model.samples:
client_sample = _FhirBiobankOrderedSample()
client_sample.test = sample.test
client_sample.description = sample.description
client_sample.processing_required = sample.processingRequired
client_sample.collected = _ToFhirDate(sample.collected)
client_sample.processed = _ToFhirDate(sample.processed)
client_sample.finalized = _ToFhirDate(sample.finalized)
resource.samples.append(client_sample)
@classmethod
def _add_identifiers_to_resource(cls, resource, model):
resource.identifier = []
for identifier in model.identifiers:
fhir_id = Identifier()
fhir_id.system = identifier.system
fhir_id.value = identifier.value
resource.identifier.append(fhir_id)
def to_client_json(self, model):
resource = _FhirBiobankOrder()
resource.subject = self._participant_id_to_subject(model.participantId)
resource.created = _ToFhirDate(model.created)
resource.notes = _FhirBiobankOrderNotes()
resource.notes.collected = model.collectedNote
resource.notes.processed = model.processedNote
resource.notes.finalized = model.finalizedNote
resource.source_site = Identifier()
resource.created_info = self._to_handling_info(model.sourceUsername, model.sourceSiteId)
resource.collected_info = self._to_handling_info(model.collectedUsername, model.collectedSiteId,
model.questSiteAddress)
resource.processed_info = self._to_handling_info(model.processedUsername, model.processedSiteId)
resource.finalized_info = self._to_handling_info(model.finalizedUsername, model.finalizedSiteId)
resource.amendedReason = model.amendedReason
resource.origin = model.orderOrigin
restored = getattr(model, "restoredSiteId")
if model.orderStatus == BiobankOrderStatus.CANCELLED:
resource.status = str(BiobankOrderStatus.CANCELLED)
resource.cancelledInfo = self._to_handling_info(model.cancelledUsername, model.cancelledSiteId)
elif restored:
resource.status = str(BiobankOrderStatus.UNSET)
resource.restoredInfo = self._to_handling_info(model.restoredUsername, model.restoredSiteId)
elif model.orderStatus == BiobankOrderStatus.AMENDED:
resource.status = str(BiobankOrderStatus.AMENDED)
resource.amendedInfo = self._to_handling_info(model.amendedUsername, model.amendedSiteId)
self._add_identifiers_to_resource(resource, model)
self._add_samples_to_resource(resource, model)
client_json = resource.as_json() # also validates required fields
client_json["id"] = model.biobankOrderId
del client_json["resourceType"]
return client_json
def _do_update(self, session, order, existing_obj):
if order.orderOrigin != existing_obj.orderOrigin:
raise BadRequest(f"Can not update biobank order which was created by other origin")
order.lastModified = clock.CLOCK.now()
order.biobankOrderId = existing_obj.biobankOrderId
order.orderStatus = BiobankOrderStatus.AMENDED
if hasattr(existing_obj, "amendedInfo") and existing_obj.amendedInfo.get("author") is not None:
order.amendedUsername = existing_obj.amendedInfo.get("author").get("value")
if hasattr(existing_obj, "amendedInfo"):
order.amendedSiteId = get_site(existing_obj.amendedInfo)
order.amendedTime = clock.CLOCK.now()
order.logPosition = LogPosition()
order.version += 1
# Ensure that if an order was previously cancelled/restored those columns are removed.
self._clear_cancelled_and_restored_fields(order)
super(BiobankOrderDao, self)._do_update(session, order, existing_obj)
session.add(order.logPosition)
self._refresh_participant_summary(session, order)
self._update_history(session, order)
def update_with_patch(self, id_, resource, expected_version):
"""creates an atomic patch request on an object. It will fail if the object
doesn't exist already, or if obj.version does not match the version of the existing object.
May modify the passed in object."""
with self.session() as session:
obj = self.get_with_children_in_session(session, id_, for_update=True)
return self._do_update_with_patch(session, obj, resource, expected_version)
def _do_update_with_patch(self, session, order, resource, expected_version):
self._validate_patch_update(order, resource, expected_version)
order.lastModified = clock.CLOCK.now()
order.logPosition = LogPosition()
order.version += 1
if resource["status"].lower() == "cancelled":
order.amendedReason = resource["amendedReason"]
order.cancelledUsername = resource["cancelledInfo"]["author"]["value"]
order.cancelledSiteId = get_site(resource["cancelledInfo"])
order.cancelledTime = clock.CLOCK.now()
order.orderStatus = BiobankOrderStatus.CANCELLED
elif resource["status"].lower() == "restored":
order.amendedReason = resource["amendedReason"]
order.restoredUsername = resource["restoredInfo"]["author"]["value"]
order.restoredSiteId = get_site(resource["restoredInfo"])
order.restoredTime = clock.CLOCK.now()
order.orderStatus = BiobankOrderStatus.UNSET
else:
raise BadRequest("status must be restored or cancelled for patch request.")
super(BiobankOrderDao, self)._do_update(session, order, resource)
self._update_history(session, order)
self._refresh_participant_summary(session, order)
return order
def _validate_patch_update(self, model, resource, expected_version):
if expected_version != model.version:
raise PreconditionFailed(
f"Expected version was {expected_version}; stored version was {model.version}"
)
required_cancelled_fields = ["amendedReason", "cancelledInfo", "status"]
required_restored_fields = ["amendedReason", "restoredInfo", "status"]
if "status" not in resource:
raise BadRequest("status of cancelled/restored is required")
if resource["status"] == "cancelled":
if model.orderStatus == BiobankOrderStatus.CANCELLED:
raise BadRequest("Can not cancel an order that is already cancelled.")
for field in required_cancelled_fields:
if field not in resource:
raise BadRequest(f"{field} is required for a cancelled biobank order")
if "site" not in resource["cancelledInfo"] or "author" not in resource["cancelledInfo"]:
raise BadRequest("author and site are required for cancelledInfo")
elif resource["status"] == "restored":
if model.orderStatus != BiobankOrderStatus.CANCELLED:
raise BadRequest("Can not restore an order that is not cancelled.")
for field in required_restored_fields:
if field not in resource:
raise BadRequest(f"{field} is required for a restored biobank order")
if "site" not in resource["restoredInfo"] or "author" not in resource["restoredInfo"]:
raise BadRequest("author and site are required for restoredInfo")
def _update_history(self, session, order):
# Increment the version and add a new history entry.
session.flush()
history = BiobankOrderHistory()
history.fromdict(order.asdict(follow=["logPosition"]), allow_pk=True)
history.logPositionId = order.logPosition.logPositionId
session.add(history)
self._update_identifier_history(session, order)
self._update_sample_history(session, order)
@staticmethod
def _update_identifier_history(session, order):
session.flush()
for identifier in order.identifiers:
history = BiobankOrderIdentifierHistory()
history.fromdict(identifier.asdict(), allow_pk=True)
history.version = order.version
history.biobankOrderId = order.biobankOrderId
session.add(history)
@staticmethod
def _update_sample_history(session, order):
session.flush()
for sample in order.samples:
history = BiobankOrderedSampleHistory()
history.fromdict(sample.asdict(), allow_pk=True)
history.version = order.version
history.biobankOrderId = order.biobankOrderId
session.add(history)
@staticmethod
def _clear_cancelled_and_restored_fields(order):
# pylint: disable=unused-argument
""" Just in case these fields have values, we don't want them in the most recent record for an
amendment, they will exist in history tables."""
order.restoredUsername = None
order.restoredTime = None
order.cancelledUsername = None
order.cancelledTime = None
order.restoredSiteId = None
order.cancelledSiteId = None
order.status = BiobankOrderStatus.UNSET
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" Module baseFigure
Defines the AxisContainer and Axes classes, as well as the Legend class used
by the Axes.
"""
import OpenGL.GL as gl
from visvis.utils.pypoints import Pointset
#
import visvis as vv
from visvis.core import base
from visvis.core.base import DRAW_NORMAL, DRAW_FAST, DRAW_SHAPE, DRAW_SCREEN
from visvis.core.misc import Property, PropWithDraw, DrawAfter
from visvis.core.misc import Range, getColor, basestring
#
from visvis.core.baseWibjects import Box, DraggableBox
from visvis.core import cameras
from visvis.core.cameras import ortho
from visvis.text import Label
from visvis.core.line import Line
from visvis.core.axises import BaseAxis, CartesianAxis, PolarAxis2D
from visvis.core.light import Light
def _Screenshot():
""" _Screenshot()
Capture the screen as a numpy array to use it later.
Used by the object picker helper to determine which item is
under the mouse, and by the axes to buffer its content.
"""
gl.glReadBuffer(gl.GL_BACK)
xywh = gl.glGetIntegerv(gl.GL_VIEWPORT)
x,y,w,h = xywh[0], xywh[1], xywh[2], xywh[3]
# use floats to prevent strides etc. uint8 caused crash on qt backend.
im = gl.glReadPixels(x, y, w, h, gl.GL_RGB, gl.GL_FLOAT)
# reshape, flip, and store
im.shape = h,w,3
return im
class _BaseFigure(base.Wibject):
""" Abstract class that the BaseFigure inherits from. It solves
the mutual dependence of the Axes and BaseFigure classes.
"""
pass
class AxesContainer(Box):
""" AxesContainer(parent)
A simple container wibject class to contain one Axes instance.
Each Axes in contained in an AxesContainer instance. By default
the axes position is expressed in pixel coordinates, while the
container's position is expressed in unit coordinates. This
enables advanced positioning of the Axes.
When there is one axes in a figure, the container position will
be "0,0,1,1". For subplots however, the containers are positioned
to devide the figure in equal parts. The Axes instances themselves
are positioned in pixels, such that when resizing, the margins for
the tickmarks and labels remains equal.
The only correct way to create (and obtain a reference to)
an AxesContainer instance is to use:
* axes = vv.Axes(figure)
* container = axes.parent
This container is automatically destroyed once the axes is removed.
You can attach wibjects to an instance of this class, but note that
the container object is destroyed as soon as the axes is gone.
"""
def __init__(self, parent, *args, **kwargs):
# check that the parent is a Figure
if not isinstance(parent, _BaseFigure):
raise Exception("The given parent for an AxesContainer " +
"should be a Figure.")
# Init box
Box.__init__(self, parent, *args, **kwargs)
# Init position
self.position = 0,0,1,1
# Set properties
self.edgeWidth = 0
self.bgcolor = None
def GetAxes(self):
""" GetAxes()
Get the axes. Creates a new axes object if it has none.
"""
if self._children:
child = self._children[0]
if isinstance(child, Axes):
return child
return None
def _DrawTree(self, mode, *args, **kwargs):
""" _DrawTree(mode, *args, **kwargs)
Pass on, but Destroy itself if axes is gone.
"""
axes = self.GetAxes()
if axes:
# Draw normally
base.Wibject._DrawTree(self, mode, *args, **kwargs)
else:
self.Destroy()
class Axes(base.Wibject):
""" Axes(parent, axisClass=None)
An Axes instance represents the scene with a local coordinate system
in which wobjects can be drawn. It has various properties to influence
the appearance of the scene, such as aspect ratio and lighting.
To set the appearance of the axis (the thing that indicates x, y and z),
use the properties of the Axis instance. For example:
Axes.axis.showGrid = True
The cameraType determines how the data is visualized and how the user
can interact with the data.
The daspect property represents the aspect ratio of the data as a
three element tuple. The sign of the elements indicate dimensions
being flipped. (The function imshow() for example flips the
y-dimension). If daspectAuto is False, all dimensions are always
equally zoomed (The function imshow() sets this to False).
An Axes can be created with the function vv.subplot() or vv.gca().
"""
def __init__(self, parent, axisClass=None):
# check that the parent is a Figure or AxesContainer
if isinstance(parent, AxesContainer):
figure = parent.parent
elif isinstance(parent, _BaseFigure):
figure = parent
parent = AxesContainer(figure)
else:
raise Exception("The given parent for an Axes " +
"should be a Figure or AxesContainer.")
# call base __init__
base.Wibject.__init__(self, parent)
# objects in the scene. The Axes is the only wibject that
# can contain wobjects. Basically, the Axes is the root
# for all the wobjects in it.
self._wobjects = []
# data aspect ratio. If daspectAuto is True, the values
# of daspect are ignored (only the sign is taken into account)
self._daspect = (1.0,1.0,1.0)
self._daspectAuto = None # None is like False, but means not being set
# screenshot buffer and variable to indicate whether we can use it
self._screenshot = None
self._isdirty = True
self._motionBlur = 0.0
self._useBuffer = True
# varialble to keep track of the position correction to fit labels
self._xCorr, self._yCorr = 0, 0
# create cameras and select 3D as the defaule
self._cameras = {}
self.camera = cameras.TwoDCamera()
self.camera = cameras.ThreeDCamera()
self.camera = cameras.FlyCamera()
self.camera = '3D' # Select
# init the background color of this axes
self.bgcolor = 1,1,1 # remember that bgcolor is a property
self.bgcolors = None
# bind to event (no need to unbind because it's our own)
self.eventMouseDown.Bind(self._OnMouseDown)
self.eventKeyDown.Bind(self._OnKeyDown)
self.eventScroll.Bind(self._OnScroll)
# Store axis class and instantiate it
if axisClass is None or not isinstance(axisClass, BaseAxis):
axisClass = CartesianAxis
self._axisClass = axisClass
axisClass(self) # is a wobject
# Let there be lights
self._lights = []
for i in range(8):
self._lights.append(Light(self, i))
# Init default light
self.light0.On()
# make current
figure.currentAxes = self
## Define more methods
@DrawAfter
def SetLimits(self, rangeX=None, rangeY=None, rangeZ=None, margin=0.02):
""" SetLimits(rangeX=None, rangeY=None, rangeZ=None, margin=0.02)
Set the limits of the scene. For the 2D camera, these are taken
as hints to set the camera view. For the 3D camear, they determine
where the axis is drawn.
Returns a 3-element tuple of visvis.Range objects.
Parameters
----------
rangeX : (min, max), optional
The range for the x dimension.
rangeY : (min, max), optional
The range for the y dimension.
rangeZ : (min, max), optional
The range for the z dimension.
margin : scalar
Represents the fraction of the range to add for the
ranges that are automatically obtained (default 2%).
Notes
-----
Each range can be None, a 2 element iterable, or a visvis.Range
object. If a range is None, the range is automatically obtained
from the wobjects currently in the scene. To set the range that
will fit all wobjects, simply use "SetLimits()"
"""
# Check margin
if margin and not isinstance(margin, float):
raise ValueError('In SetLimits(): margin should be a float.')
# if tuples, convert to ranges
if rangeX is None or isinstance(rangeX, Range):
pass # ok
elif hasattr(rangeX,'__len__') and len(rangeX)==2:
rangeX = Range(rangeX[0], rangeX[1])
else:
raise ValueError("Limits should be Ranges or two-element iterables.")
if rangeY is None or isinstance(rangeY, Range):
pass # ok
elif hasattr(rangeY,'__len__') and len(rangeY)==2:
rangeY = Range(rangeY[0], rangeY[1])
else:
raise ValueError("Limits should be Ranges or two-element iterables.")
if rangeZ is None or isinstance(rangeZ, Range):
pass # ok
elif hasattr(rangeZ,'__len__') and len(rangeZ)==2:
rangeZ = Range(rangeZ[0], rangeZ[1])
else:
raise ValueError("Limits should be Ranges or two-element iterables.")
rX, rY, rZ = rangeX, rangeY, rangeZ
if None in [rX, rY, rZ]:
# find outmost range
wobjects = self.FindObjects(base.Wobject)
for ob in wobjects:
# Ask object what it's limits are
tmp = ob._GetLimits()
if not tmp:
continue
tmpX, tmpY, tmpZ = tmp
# Check for NaNs
if tmpX.min*0 != 0 or tmpX.max*0 != 0:
tmpX = None
if tmpY.min*0 != 0 or tmpY.max*0 != 0:
tmpY = None
if tmpZ.min*0 != 0 or tmpZ.max*0 != 0:
tmpZ = None
# update min/max
if rangeX:
pass
elif tmpX and rX:
rX = Range( min(rX.min, tmpX.min), max(rX.max, tmpX.max) )
elif tmpX:
rX = tmpX
if rangeY:
pass
elif tmpY and rY:
rY = Range( min(rY.min, tmpY.min), max(rY.max, tmpY.max) )
elif tmpY:
rY = tmpY
if rangeZ:
pass
elif tmpZ and rZ:
rZ = Range( min(rZ.min, tmpZ.min), max(rZ.max, tmpZ.max) )
elif tmpX:
rZ = tmpZ
# default values
if rX is None:
rX = Range(-1,1)
if rY is None:
rY = Range(0,1)
if rZ is None:
rZ = Range(0,1)
# apply margins
if margin:
if rangeX is None:
tmp = rX.range * margin
if tmp == 0: tmp = margin
rX = Range( rX.min-tmp, rX.max+tmp )
if rangeY is None:
tmp = rY.range * margin
if tmp == 0: tmp = margin
rY = Range( rY.min-tmp, rY.max+tmp )
if rangeZ is None:
tmp = rZ.range * margin
if tmp == 0: tmp = margin
rZ = Range( rZ.min-tmp, rZ.max+tmp )
# apply to each camera
for cam in self._cameras.values():
cam.SetLimits(rX, rY, rZ)
# return
return rX, rY, rZ
def GetLimits(self):
""" GetLimits()
Get the limits of the axes as currently displayed. This can differ
from what was set by SetLimits if the daspectAuto is False. With
a 2D camera, this returns the limits for x and y determined by the
view. With a 3D camera, this returns the x, y, and z extents of
the coordinate axes.
"""
return self.camera.GetLimits()
def GetView(self):
""" GetView()
Get a dictionary with the camera parameters. The parameters are
named so they can be changed in a natural way and fed back using
SetView(). Note that the parameters can differ for different camera
types.
"""
return self.camera.GetViewParams()
@DrawAfter
def SetView(self, s=None, **kw):
""" SetView(s=None, **kw)
Set the camera view using the given dictionary with camera parameters.
Camera parameters can also be passed as keyword/value pairs; these will
supersede the values of the same key in s. If neither s nor any keywords
are set, the camera is reset to its initial state.
"""
if s or kw:
self.camera.SetViewParams(s, **kw)
else:
self.camera.Reset()
def Draw(self, fast=False):
""" Draw(fast=False)
Calls Draw(fast) on its figure, as the total opengl canvas
has to be redrawn. This might change in the future though.
"""
if self._isbeingdrawn:
return False
else:
# Make dirty
self._isdirty = True
# Draw figure
figure = self.GetFigure()
if figure:
figure.Draw(fast)
# Done
return True
@DrawAfter
def Clear(self, clearForDestruction=False):
""" Clear()
Clear the axes. Removing all wobjects in the scene.
"""
# Remove wobjects
for w in self.wobjects:
if isinstance(w, BaseAxis) and not clearForDestruction:
continue
elif hasattr(w,'Destroy'):
w.Destroy()
@property
def wobjects(self):
""" Get a shallow copy of the list of wobjects in the scene.
"""
return [child for child in self._wobjects]
def _CorrectPositionForLabels(self):
""" _CorrectPositionForLabels()
Correct the position for the labels and title etc.
"""
# init correction
xCorr, yCorr = 0, 0
# correction should be applied for 2D camera and a valid label
if isinstance(self.camera, cameras.TwoDCamera):
axis = self.axis
if isinstance(axis, PolarAxis2D):
if axis.visible and axis.xLabel:
yCorr += 25
else:
if axis.visible:
yCorr += 20
xCorr += 60 # there's already a margin of 10 by default
if axis.xLabel:
yCorr += 20
if axis.yLabel:
xCorr += 20
# check the difference
if xCorr != self._xCorr or yCorr != self._yCorr:
dx = self._xCorr - xCorr
dy = self._yCorr - yCorr
self._xCorr, self._yCorr = xCorr, yCorr
# apply
self.position.Correct(-dx, 0, dx, dy)
## Define more properties
@PropWithDraw
def bgcolors():
""" Get/Set the colors for the axes background gradient. If used, this
value overrides the normal bgcolor property. Notes:
* Set to None to disable the gradient
* Setting two colors defines a gradient from top to bottom.
* Setting four colors sets the colors at the four corners.
* The value must be an iterable (2 or 4 elements) in which each
element can be converted to a color.
"""
def fget(self):
return self._bgcolors
def fset(self, value):
# None?
if value is None:
self._bgcolors = None
return
# Check
try:
if len(value) not in [2,4]:
raise ValueError('bgcolors must have 2 or 4 elements.')
except Exception:
# not an iterable
raise ValueError('bgcolors must be None, or tuple/list/string.')
# Apply
colors = [getColor(val, 'setting bgcolors') for val in value]
self._bgcolors = tuple(colors)
return locals()
@property
def axis(self):
""" Get the axis object associated with this axes.
A new instance is created if it does not yet exist. This object
can be used to change the appearance of the axis (tickmarks, labels,
grid, etc.).
See also the [[cls_BaseAxis Axis class]].
"""
# Find object in root
for object in self._wobjects:
if isinstance(object, BaseAxis):
return object
else:
# Create new and return
return self._axisClass(self)
@PropWithDraw
def axisType():
""" Get/Set the axis type to use.
Currently supported are:
* 'cartesian' - a normal axis (default)
* 'polar' - a polar axis.
"""
def fget(self):
D = {PolarAxis2D:'polar', CartesianAxis:'cartesian'}
if self._axisClass in D:
return D[self._axisClass]
else:
return ''
def fset(self, axisClass):
# Handle string argument
if not isinstance(axisClass, BaseAxis):
D = {'polar':PolarAxis2D, 'cartesian':CartesianAxis}
if axisClass not in D:
raise ValueError('Invalid axis class.')
axisClass = D[axisClass.lower()]
if axisClass is not self._axisClass:
# Store class
self._axisClass = axisClass
# Remove previous
axisList = self.FindObjects(BaseAxis)
for axis in axisList:
axis.Destroy()
# Add new
axisClass(self)
return locals()
@PropWithDraw
def camera():
""" Get/Set the current camera.
Setting can be done using:
* The index of the camera; 1,2,3 for fly, 2d and 3d respectively.
* A value as in the 'cameraType' property.
* A new camera instance. This will replace any existing camera
of the same type. To have multiple 3D cameras at the same axes,
one needs to subclass cameras.ThreeDCamera.
Shared cameras
--------------
One can set the camera to the camera of another Axes, so that they
share the same camera. A camera that is shared uses daspectAuto
property of the first axes it was attached to.
Interactively changing a camera
-------------------------------
By default, the camera can be changed using the keyboard using the
shortcut ALT+i, where i is the camera number. Similarly
the daspectAuto propert can be switched with ALT+d.
"""
def fget(self):
return self._camera
def fset(self, value):
if isinstance(value, (basestring, int)):
# Type
self.cameraType = value
else:
# It must be a camera
camera = value
# Check
if not isinstance(camera, cameras.BaseCamera):
raise ValueError('Given argument is not a camera.')
# Store camera
camType = camera.__class__.__name__
oldCamera = self._cameras.get(camType, None)
self._cameras[camType] = camera
# Register at camera, unregister at old one
camera._RegisterAxes(self)
if oldCamera and oldCamera is not camera:
oldCamera._UnregisterAxes(self)
# Make current and set limits
self._camera = camera
self.SetLimits()
return locals()
@PropWithDraw
def cameraType():
""" Get/Set the camera type to use.
Currently supported are:
* '2d' or 2 - two dimensional camera that looks down the z-dimension.
* '3d' or 3 - three dimensional camera.
* 'fly' or 1 - a camera like a flight sim.
"""
def fget(self):
return self._camera._NAMES[0]
def fset(self, name):
# Case insensitive
if isinstance(name, basestring):
name = name.lower()
# Get camera with that name
theCamera = None
for camera in self._cameras.values():
if name in camera._NAMES:
theCamera = camera
break
# Set or raise error
if theCamera:
self._camera = theCamera
else:
raise ValueError("Unknown camera type!")
return locals()
@property
def mousepos(self):
""" Get position of mouse in screen pixels, relative to this axes.
"""
figure = self.GetFigure()
if not figure:
return 0,0
x,y = figure.mousepos
pos = self.position
return x-pos.absLeft, y-pos.absTop
@PropWithDraw
def daspect():
""" Get/set the data aspect ratio of the current camera. Setting will
also update daspect for the other cameras.
The daspect is a 3-element tuple (x,y,z). If a 2-element tuple is
given, z is assumed 1. Note that only the ratio between the values
matters (i.e. (1,1,1) equals (2,2,2)). When a value is negative, the
corresponding dimension is flipped.
Note that if daspectAuto is True, the camera automatically changes
its daspect to nicely scale the data to fit the screen (but the sign
is preserved).
"""
def fget(self):
return self.camera.daspect
def fset(self, value):
# Set on all cameras
camera = None
for camera in self._cameras.values():
camera.daspect = value
# Set on self so new cameras can see what the user set.
# Use camera's daspect, in case a 2-element tuple was used.
if camera is not None:
self._daspect = camera.daspect
return locals()
@property
def daspectNormalized(self):
""" Get the data aspect ratio, normalized such that the x scaling
is +/- 1.
"""
return self.camera.daspectNormalized
@PropWithDraw
def daspectAuto():
""" Get/Set whether to scale the dimensions independently.
If True, the camera changes the value of its daspect to nicely fit
the data on screen (but the sign is preserved). This can happen
(depending on the type of camera) during resetting, zooming, and
resizing of the axes.
If set to False, the daspect of all cameras is reverted to
the user-set daspect.
"""
def fget(self):
return self._daspectAuto
def fset(self, value):
# Set dastecpAuto
self._daspectAuto = bool(value)
# Update daspect if False
if not value:
self.daspect = self._daspect
return locals()
@PropWithDraw
def legend():
""" Get/Set the string labels for the legend. Upon setting,
a legend wibject is automatically shown.
"""
def fget(self):
return self.legendWibject._stringList
def fset(self, value):
self.legendWibject.SetStrings(value)
return locals()
@property
def legendWibject(self):
""" Get the legend wibject, so for exampe its position
can be changed programatically.
"""
legendWibjects = self.FindObjects(Legend)
if not legendWibjects:
legendWibjects = [Legend(self)] # create legend object
return legendWibjects[-1]
@property
def light0(self):
""" Get the default light source in the scene.
"""
return self._lights[0]
@property
def lights(self):
""" Get a list of all available lights in the scene. Only light0 is
enabeled by default.
"""
return [light for light in self._lights]
@PropWithDraw
def useBuffer():
""" Get/Set whether to use a buffer; after drawing, a screenshot
of the result is obtained and stored. When the axes needs to
be redrawn, but has not changed, the buffer can be used to
draw the contents at great speed (default True).
"""
def fget(self):
return self._useBuffer
def fset(self, value):
self._useBuffer = bool(value)
return locals()
@Property
def motionBlur():
""" Get/Set the amount of motion blur when interacting with
this axes. The value should be a number between 0 and 1.
Note: this is a rather useless feature :)
"""
def fget(self):
return self._motionBlur
def fset(self, value):
tmp = float(value)
self._motionBlur = min(max(tmp,0.0),1.0)
return locals()
## Implement methods
def OnDestroy(self):
# Clean up.
base.Wibject.OnDestroy(self)
self.Clear(True)
self._camera = None
self._cameras = {}
# container is destroyed as soon as it notices the axes is gone
# any wibjects are destoyed automatically by the Destroy command.
def OnDrawShape(self, clr):
# Correct size for labels (shape is the first draw pass)
self._CorrectPositionForLabels()
# Get picker helper and draw
pickerHelper = self.GetFigure()._pickerHelper
# Size of figure ...
fig = self.GetFigure()
w,h = fig.position.size
# Find actual position in pixels, do not allow negative values
pos = self.position.InPixels()
pos._w, pos._h = max(pos.w, 1), max(pos.h, 1)
pos.h_fig = h
pos._Update()
# Set viewport (note that OpenGL has origin in lower-left, visvis
# in upper-left)
gl.glViewport(pos.absLeft, h-pos.absBottom, pos.w, pos.h)
self._OnDrawContent(DRAW_SHAPE, clr, pos, pickerHelper)
# Prepare for wibject children (draw in full viewport)
gl.glViewport(0,0,w,h)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
ortho( 0, w, h, 0)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# Transform
self.parent._Transform() # Container
self._Transform() # Self
def OnDrawFast(self):
self._OnDrawInMode(DRAW_FAST, self.bgcolor)
def OnDraw(self):
self._OnDrawInMode(DRAW_NORMAL, self.bgcolor)
def _OnDrawInMode(self, mode, bgcolor, pickerHelper=None):
# Draw the background of the axes and the wobjects in it.
# Prepare
if True:
# Get size of figure ...
fig = self.GetFigure()
w,h = fig.position.size
# Correction of size for labels is normally done in OnDrawShape,
# but this is not called if user interaction is disabled ...
if not fig.enableUserInteraction:
self._CorrectPositionForLabels()
# Find actual position in pixels, do not allow negative values
pos = self.position.InPixels()
pos._w, pos._h = max(pos.w, 1), max(pos.h, 1)
pos.h_fig = h
pos._Update()
# Set viewport (note that OpenGL has origin in lower-left, visvis
# in upper-left)
gl.glViewport(pos.absLeft, h-pos.absBottom, pos.w, pos.h)
# Select screenshot
sshot = self._screenshot
# Perform tests
# Only if enabled on axes and if user interaction is enabled for the figure
if self._useBuffer and fig.enableUserInteraction:
# Test if we can use the screenshot
canUseScreenshot = ( (sshot is not None) and
sshot.shape[0] == pos.h and
sshot.shape[1] == pos.w )
# Test if we want to blur with the screenshot
blurWithScreenshot = ( bool(self._motionBlur) and
self._isdirty and
mode==DRAW_FAST )
# Test whether we should use the screenshot
shouldUseScreenshot = ( canUseScreenshot and
(not self._isdirty or blurWithScreenshot) )
else:
# Old school mode
shouldUseScreenshot = False
blurWithScreenshot = False
# Draw content of axes (if we need to)
if (not shouldUseScreenshot) or blurWithScreenshot:
# Draw fresh
self._OnDrawContent(mode, bgcolor, pos, pickerHelper)
# Make screenshot and store/combine
if self._useBuffer and fig.enableUserInteraction:
tmp = _Screenshot()
shapesMatch = (sshot is not None) and tmp.shape == sshot.shape
if blurWithScreenshot and shapesMatch:
f = self._motionBlur
sshot[:] = f*sshot + (1.0-f)*tmp
else:
self._screenshot = tmp
# Draw screenshot (if we should)
if shouldUseScreenshot:
# Set view
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
ortho( 0, 1, 0, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# Apply bitmap directly
sshot = self._screenshot
gl.glRasterPos(0,0)
gl.glDrawPixels(pos.w, pos.h, gl.GL_RGB, gl.GL_FLOAT, sshot)
# # Set viewport to the full figure and disable depth test
if True:
gl.glViewport(0,0,w,h)
gl.glDisable(gl.GL_DEPTH_TEST)
# Draw axis if using the 2D camera
if isinstance(self.camera, cameras.TwoDCamera):
# Let axis object for 2D-camera draw in screen coordinates
# in the full viewport.
# Note that if the buffered screenshot is used and the content
# is not drawn, the axis' OnDraw method is not called, and the
# ticks are therefore not re-calculated (which is time-consuming).
# Set view
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
ortho( 0, w, 0, h) # Note that 0 and h are swapped
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# Draw
for item in self._wobjects:
if isinstance(item, BaseAxis):
item._DrawTree(DRAW_SCREEN)
# Prepare for drawing child wibjects in screen coordinates
if True:
# Set view
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
ortho( 0, w, h, 0)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# Transform
self.parent._Transform() # Container
self._Transform() # Self
# We're clean now ...
if mode != DRAW_SHAPE:
self._isdirty = False
def _OnDrawContent(self, mode, bgcolor, pos, pickerHelper=None):
# Draw background
if bgcolor:
# Set view
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
ortho( 0, 1, 0, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# Overwrite all
gl.glDisable(gl.GL_DEPTH_TEST)
# Define colors, use gradient?
bgcolor1 = bgcolor2 = bgcolor3 = bgcolor4 = bgcolor
if mode != DRAW_SHAPE and self.bgcolors:
gl.glShadeModel(gl.GL_SMOOTH)
if len(self.bgcolors) == 2:
bgcolor1 = bgcolor2 = self.bgcolors[0]
bgcolor3 = bgcolor4 = self.bgcolors[1]
elif len(self.bgcolors) == 4:
bgcolor1, bgcolor2, bgcolor3, bgcolor4 = self.bgcolors
# Draw
gl.glBegin(gl.GL_POLYGON)
gl.glColor3f(bgcolor3[0], bgcolor3[1], bgcolor3[2])
gl.glVertex2f(0,0)
gl.glColor3f(bgcolor1[0], bgcolor1[1], bgcolor1[2])
gl.glVertex2f(0,1)
gl.glColor3f(bgcolor2[0], bgcolor2[1], bgcolor2[2])
gl.glVertex2f(1,1)
gl.glColor3f(bgcolor4[0], bgcolor4[1], bgcolor4[2])
gl.glVertex2f(1,0)
gl.glEnd()
# Reset
gl.glEnable(gl.GL_DEPTH_TEST)
# Draw items in world coordinates
if True:
# Setup the camera
self.camera.SetView()
# Draw stuff, but wait with lines
lines2draw = []
for item in self._wobjects:
if isinstance(item, (Line, BaseAxis)):
lines2draw.append(item)
else:
item._DrawTree(mode, pickerHelper)
# Lines (and the axis) are a special case. In order to blend
# them well, we should draw textures, meshes etc, first.
# Note that this does not work if lines textures are children
# of each-other. in that case they should be added to the scene
# in the correct order.
for item in lines2draw:
item._DrawTree(mode, pickerHelper)
# Draw items in screen coordinates
if mode != DRAW_SHAPE:
# Set camera to screen coordinates.
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
h = pos.h_fig
ortho( pos.absLeft, pos.absRight, h-pos.absBottom, h-pos.absTop)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# Allow wobjects to draw in screen coordinates
# Note that the axis for the 2d camera needs to draw beyond
# the viewport of the axes, and is therefore drawn later.
gl.glEnable(gl.GL_DEPTH_TEST)
is2dcam = isinstance(self.camera, cameras.TwoDCamera)
for item in self._wobjects:
if is2dcam and isinstance(item, BaseAxis):
continue
item._DrawTree(DRAW_SCREEN)
def _OnMouseDown(self, event):
self.MakeCurrent()
def _OnScroll(self, event):
SCROLL_ZOOM_FACTOR = 1.1
self.camera.zoom *= SCROLL_ZOOM_FACTOR**event.verticalSteps
def _OnKeyDown(self, event):
""" Give user a lot of control via special keyboard input.
Kind of a secret function, as not all keys are not documented.
"""
# Only do this if this is the current axes
f = self.GetFigure()
if not (f and self is f.currentAxes):
return False
if vv.KEY_ALT in event.modifiers and len(event.modifiers)==1:
numbers = [ord(i) for i in '0123456789']
if event.key in numbers:
self.cameraType = int(chr(event.key))
elif event.key == ord('d'):
self.daspectAuto = not self.daspectAuto
elif event.key == ord('a'):
self.axis.visible = not self.axis.visible
elif event.key == ord('g'):
self.axis.showGrid = not any(self.axis.showGrid)
elif event.key == ord('b'):
if self.bgcolor == (1,1,1):
self.bgcolor = 'k'
self.axis.axisColor = 'w'
else:
self.bgcolor = 'w'
self.axis.axisColor = 'k'
else:
return False
else:
return False
def MakeCurrent(self):
""" MakeCurrent()
Make this the current axes. Also makes the containing figure
the current figure.
"""
f = self.GetFigure()
if f:
f.currentAxes = self
f.MakeCurrent()
class Legend(DraggableBox):
""" Legend(parent)
A legend is a wibject that should be a child (does not have
to be the direct child) of an axes. It displays a description for
each line in the axes, and is draggable.
A Legend can be shown with the function vv.legend(), or using the
Axes.legend property.
"""
def __init__(self, parent):
DraggableBox.__init__(self, parent)
# params for the layout
self._linelen = 40
self._xoffset = 10
self._yoffset = 3
self._yspacing = 16
# position in upper left by default
self.position = 10, 10
self.bgcolor = 'w'
# start with nothing
self._stringList = []
self.visible = False
# by creating a _wobjects attribute, we are allowed to hold
# wobjects, but our ourselves responsible for drawing them
self._wobjects = []
def _AddLineAndLabel(self, text, yspacing=1.0, twoPoints=True):
""" Add a line and label to our pool. """
# get y position
index = len(self._wobjects)
y = self._yoffset + yspacing * (index)
# create label
label = Label(self, text)
label.bgcolor = ''
label.position = self._xoffset*2 + twoPoints*self._linelen, y
deltax, deltay = label.GetVertexLimits()
#y2 = label.position.h / 2
y2 = (deltay[1] + deltay[0]) / 2
# create 2-element pointset
pp = Pointset(2)
pp.append(self._xoffset, y + y2)
if twoPoints:
pp.append(self._xoffset + self._linelen, y + y2)
# create line
line = Line(self, pp) # line has no parent
# return
return line, label
def SetStrings(self, *stringList):
""" SetStrings(*stringList)
Set the strings of the legend labels.
"""
# Note that setting the .visible property will invoke a draw
# test
if len(stringList)==1 and isinstance(stringList[0],(tuple,list)):
stringList = stringList[0]
for value in stringList:
if not isinstance(value, basestring):
raise ValueError("Legend string list should only contain strings.")
# store
self._stringList = stringList
# clean up labels and lines
for line in [line for line in self._wobjects]:
line.Destroy()
for label in self.children:
label.Destroy()
# find axes and figure
axes = self.parent
while axes and not isinstance(axes, Axes):
axes = axes.parent
if not axes:
return
fig = axes.GetFigure()
# collect line objects
lines = []
twoPoints = False
for ob in axes._wobjects:
if len(self._wobjects) >= len(stringList):
break
if isinstance(ob, Line):
# Add line props
tmp = ob.ls, ob.lc, ob.lw, ob.ms, ob.mc, ob.mw, ob.mec, ob.mew
lines.append(tmp)
# Set whether to use two points
twoPoints = twoPoints or bool(ob.ls and ob.lc and ob.lw)
# create new lines and labels
maxWidth = 0
nr = -1
for lineProps in lines:
nr += 1
if nr >= len(stringList):
break
# get new line and label
text = stringList[nr]
yspacing = self._yspacing * fig._relativeFontSize
line, label = self._AddLineAndLabel(text, yspacing, twoPoints)
# apply line properties
line.ls, line.lc, line.lw = lineProps[0:3]
line.ms, line.mc, line.mw = lineProps[3:6]
line.mec, line.mew = lineProps[6:8]
# correct label size and store max
deltax, deltay = label.GetVertexLimits()
label.position.w = (deltax[1]-deltax[0])+2
maxWidth = max([maxWidth, label.position.w ])
# make own size ok
if self._wobjects:
pos = label.position
self.position.w = maxWidth + pos.x + self._xoffset
#self.position.h = pos.bottom + self._yoffset
deltax, deltay = label.GetVertexLimits()
labelHeight = deltay[1]# - deltay[0]
self.position.h = pos.top + labelHeight + self._yoffset + 2
self.visible = True
else:
self.visible = False
def OnDraw(self):
# draw box
DraggableBox.OnDraw(self)
# draw lines
for line in self._wobjects:
line.OnDraw()
# reset some stuff that was set because it was thinking it was drawing
# in world coordinates.
gl.glDisable(gl.GL_DEPTH_TEST)
def OnDestroy(self):
DraggableBox.OnDestroy(self)
# clear lines and such
for ob in [ob for ob in self._wobjects]:
ob.Destroy()
|
|
"""QuizReports API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class QuizReportsAPI(BaseCanvasAPI):
"""QuizReports API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for QuizReportsAPI."""
super(QuizReportsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.QuizReportsAPI")
def retrieve_all_quiz_reports(self, course_id, quiz_id, includes_all_versions=None):
"""
Retrieve all quiz reports.
Returns a list of all available reports.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""
ID
"""
path["quiz_id"] = quiz_id
# OPTIONAL - includes_all_versions
"""
Whether to retrieve reports that consider all the submissions or only
the most recent. Defaults to false, ignored for item_analysis reports.
"""
if includes_all_versions is not None:
params["includes_all_versions"] = includes_all_versions
self.logger.debug(
"GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path),
data=data,
params=params,
all_pages=True,
)
def create_quiz_report(
self,
course_id,
quiz_id,
quiz_report_report_type,
include=None,
quiz_report_includes_all_versions=None,
):
"""
Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""
ID
"""
path["quiz_id"] = quiz_id
# REQUIRED - quiz_report[report_type]
"""
The type of report to be generated.
"""
self._validate_enum(
quiz_report_report_type, ["student_analysis", "item_analysis"]
)
data["quiz_report[report_type]"] = quiz_report_report_type
# OPTIONAL - quiz_report[includes_all_versions]
"""
Whether the report should consider all submissions or only the most
recent. Defaults to false, ignored for item_analysis.
"""
if quiz_report_includes_all_versions is not None:
data[
"quiz_report[includes_all_versions]"
] = quiz_report_includes_all_versions
# OPTIONAL - include
"""
Whether the output should include documents for the file and/or progress
objects associated with this report. (Note: JSON-API only)
"""
if include is not None:
self._validate_enum(include, ["file", "progress"])
data["include"] = include
self.logger.debug(
"POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path),
data=data,
params=params,
single_item=True,
)
def get_quiz_report(self, course_id, id, quiz_id, include=None):
"""
Get a quiz report.
Returns the data for a single quiz report.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""
ID
"""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - include
"""
Whether the output should include documents for the file and/or progress
objects associated with this report. (Note: JSON-API only)
"""
if include is not None:
self._validate_enum(include, ["file", "progress"])
params["include"] = include
self.logger.debug(
"GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def abort_generation_of_report_or_remove_previously_generated_one(
self, course_id, id, quiz_id
):
"""
Abort the generation of a report, or remove a previously generated one.
This API allows you to cancel a previous request you issued for a report to
be generated. Or in the case of an already generated report, you'd like to
remove it, perhaps to generate it another time with an updated version that
provides new features.
You must check the report's generation status before attempting to use this
interface. See the "workflow_state" property of the QuizReport's Progress
object for more information. Only when the progress reports itself in a
"queued" state can the generation be aborted.
*Responses*
- <code>204 No Content</code> if your request was accepted
- <code>422 Unprocessable Entity</code> if the report is not being generated
or can not be aborted at this stage
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""
ID
"""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"DELETE /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
class Quizreport(BaseModel):
"""Quizreport Model."""
def __init__(
self,
id=None,
quiz_id=None,
report_type=None,
readable_type=None,
includes_all_versions=None,
anonymous=None,
generatable=None,
created_at=None,
updated_at=None,
url=None,
file=None,
progress_url=None,
progress=None,
):
"""Init method for Quizreport class."""
self._id = id
self._quiz_id = quiz_id
self._report_type = report_type
self._readable_type = readable_type
self._includes_all_versions = includes_all_versions
self._anonymous = anonymous
self._generatable = generatable
self._created_at = created_at
self._updated_at = updated_at
self._url = url
self._file = file
self._progress_url = progress_url
self._progress = progress
self.logger = logging.getLogger("py3canvas.Quizreport")
@property
def id(self):
"""the ID of the quiz report."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def quiz_id(self):
"""the ID of the quiz."""
return self._quiz_id
@quiz_id.setter
def quiz_id(self, value):
"""Setter for quiz_id property."""
self.logger.warn(
"Setting values on quiz_id will NOT update the remote Canvas instance."
)
self._quiz_id = value
@property
def report_type(self):
"""which type of report this is possible values: 'student_analysis', 'item_analysis'."""
return self._report_type
@report_type.setter
def report_type(self, value):
"""Setter for report_type property."""
self.logger.warn(
"Setting values on report_type will NOT update the remote Canvas instance."
)
self._report_type = value
@property
def readable_type(self):
"""a human-readable (and localized) version of the report_type."""
return self._readable_type
@readable_type.setter
def readable_type(self, value):
"""Setter for readable_type property."""
self.logger.warn(
"Setting values on readable_type will NOT update the remote Canvas instance."
)
self._readable_type = value
@property
def includes_all_versions(self):
"""boolean indicating whether the report represents all submissions or only the most recent ones for each student."""
return self._includes_all_versions
@includes_all_versions.setter
def includes_all_versions(self, value):
"""Setter for includes_all_versions property."""
self.logger.warn(
"Setting values on includes_all_versions will NOT update the remote Canvas instance."
)
self._includes_all_versions = value
@property
def anonymous(self):
"""boolean indicating whether the report is for an anonymous survey. if true, no student names will be included in the csv."""
return self._anonymous
@anonymous.setter
def anonymous(self, value):
"""Setter for anonymous property."""
self.logger.warn(
"Setting values on anonymous will NOT update the remote Canvas instance."
)
self._anonymous = value
@property
def generatable(self):
"""boolean indicating whether the report can be generated, which is true unless the quiz is a survey one."""
return self._generatable
@generatable.setter
def generatable(self, value):
"""Setter for generatable property."""
self.logger.warn(
"Setting values on generatable will NOT update the remote Canvas instance."
)
self._generatable = value
@property
def created_at(self):
"""when the report was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn(
"Setting values on created_at will NOT update the remote Canvas instance."
)
self._created_at = value
@property
def updated_at(self):
"""when the report was last updated."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn(
"Setting values on updated_at will NOT update the remote Canvas instance."
)
self._updated_at = value
@property
def url(self):
"""the API endpoint for this report."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn(
"Setting values on url will NOT update the remote Canvas instance."
)
self._url = value
@property
def file(self):
"""if the report has finished generating, a File object that represents it. refer to the Files API for more information about the format."""
return self._file
@file.setter
def file(self, value):
"""Setter for file property."""
self.logger.warn(
"Setting values on file will NOT update the remote Canvas instance."
)
self._file = value
@property
def progress_url(self):
"""if the report has not yet finished generating, a URL where information about its progress can be retrieved. refer to the Progress API for more information (Note: not available in JSON-API format)."""
return self._progress_url
@progress_url.setter
def progress_url(self, value):
"""Setter for progress_url property."""
self.logger.warn(
"Setting values on progress_url will NOT update the remote Canvas instance."
)
self._progress_url = value
@property
def progress(self):
"""if the report is being generated, a Progress object that represents the operation. Refer to the Progress API for more information about the format. (Note: available only in JSON-API format)."""
return self._progress
@progress.setter
def progress(self, value):
"""Setter for progress property."""
self.logger.warn(
"Setting values on progress will NOT update the remote Canvas instance."
)
self._progress = value
|
|
#!/usr/bin/python
#
# offcputime Summarize off-CPU time by stack trace
# For Linux, uses BCC, eBPF.
#
# USAGE: offcputime [-h] [-p PID | -u | -k] [-U | -K] [-f] [duration]
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 13-Jan-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from sys import stderr
from time import sleep, strftime
import argparse
import errno
import signal
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
def stack_id_err(stack_id):
# -EFAULT in get_stackid normally means the stack-trace is not available,
# Such as getting kernel stack trace in userspace code
return (stack_id < 0) and (stack_id != -errno.EFAULT)
# arguments
examples = """examples:
./offcputime # trace off-CPU stack time until Ctrl-C
./offcputime 5 # trace for 5 seconds only
./offcputime -f 5 # 5 seconds, and output in folded format
./offcputime -m 1000 # trace only events that last more than 1000 usec
./offcputime -M 10000 # trace only events that last less than 10000 usec
./offcputime -p 185 # only trace threads for PID 185
./offcputime -t 188 # only trace thread 188
./offcputime -u # only trace user threads (no kernel)
./offcputime -k # only trace kernel threads (no user)
./offcputime -U # only show user space stacks (no kernel)
./offcputime -K # only show kernel space stacks (no user)
"""
parser = argparse.ArgumentParser(
description="Summarize off-CPU time by stack trace",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
thread_group = parser.add_mutually_exclusive_group()
# Note: this script provides --pid and --tid flags but their arguments are
# referred to internally using kernel nomenclature: TGID and PID.
thread_group.add_argument("-p", "--pid", metavar="PID", dest="tgid",
help="trace this PID only", type=positive_int)
thread_group.add_argument("-t", "--tid", metavar="TID", dest="pid",
help="trace this TID only", type=positive_int)
thread_group.add_argument("-u", "--user-threads-only", action="store_true",
help="user threads only (no kernel threads)")
thread_group.add_argument("-k", "--kernel-threads-only", action="store_true",
help="kernel threads only (no user threads)")
stack_group = parser.add_mutually_exclusive_group()
stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
help="show stacks from user space only (no kernel space stacks)")
stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
help="show stacks from kernel space only (no user space stacks)")
parser.add_argument("-d", "--delimited", action="store_true",
help="insert delimiter between kernel/user stacks")
parser.add_argument("-f", "--folded", action="store_true",
help="output folded format")
parser.add_argument("--stack-storage-size", default=1024,
type=positive_nonzero_int,
help="the number of unique stack traces that can be stored and "
"displayed (default 1024)")
parser.add_argument("duration", nargs="?", default=99999999,
type=positive_nonzero_int,
help="duration of trace, in seconds")
parser.add_argument("-m", "--min-block-time", default=1,
type=positive_nonzero_int,
help="the amount of time in microseconds over which we " +
"store traces (default 1)")
parser.add_argument("-M", "--max-block-time", default=(1 << 64) - 1,
type=positive_nonzero_int,
help="the amount of time in microseconds under which we " +
"store traces (default U64_MAX)")
parser.add_argument("--state", type=positive_int,
help="filter on this thread state bitmask (eg, 2 == TASK_UNINTERRUPTIBLE" +
") see include/linux/sched.h")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
folded = args.folded
duration = int(args.duration)
debug = 0
# signal handler
def signal_ignore(signal, frame):
print()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#define MINBLOCK_US MINBLOCK_US_VALUEULL
#define MAXBLOCK_US MAXBLOCK_US_VALUEULL
struct key_t {
u32 pid;
u32 tgid;
int user_stack_id;
int kernel_stack_id;
char name[TASK_COMM_LEN];
};
BPF_HASH(counts, struct key_t);
BPF_HASH(start, u32);
BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE);
int oncpu(struct pt_regs *ctx, struct task_struct *prev) {
u32 pid = prev->pid;
u32 tgid = prev->tgid;
u64 ts, *tsp;
// record previous thread sleep time
if ((THREAD_FILTER) && (STATE_FILTER)) {
ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
// get the current thread's start time
pid = bpf_get_current_pid_tgid();
tgid = bpf_get_current_pid_tgid() >> 32;
tsp = start.lookup(&pid);
if (tsp == 0) {
return 0; // missed start or filtered
}
// calculate current thread's delta time
u64 delta = bpf_ktime_get_ns() - *tsp;
start.delete(&pid);
delta = delta / 1000;
if ((delta < MINBLOCK_US) || (delta > MAXBLOCK_US)) {
return 0;
}
// create map key
struct key_t key = {};
key.pid = pid;
key.tgid = tgid;
key.user_stack_id = USER_STACK_GET;
key.kernel_stack_id = KERNEL_STACK_GET;
bpf_get_current_comm(&key.name, sizeof(key.name));
counts.increment(key, delta);
return 0;
}
"""
# set thread filter
thread_context = ""
if args.tgid is not None:
thread_context = "PID %d" % args.tgid
thread_filter = 'tgid == %d' % args.tgid
elif args.pid is not None:
thread_context = "TID %d" % args.pid
thread_filter = 'pid == %d' % args.pid
elif args.user_threads_only:
thread_context = "user threads"
thread_filter = '!(prev->flags & PF_KTHREAD)'
elif args.kernel_threads_only:
thread_context = "kernel threads"
thread_filter = 'prev->flags & PF_KTHREAD'
else:
thread_context = "all threads"
thread_filter = '1'
if args.state == 0:
state_filter = 'prev->state == 0'
elif args.state:
# these states are sometimes bitmask checked
state_filter = 'prev->state & %d' % args.state
else:
state_filter = '1'
bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
bpf_text = bpf_text.replace('STATE_FILTER', state_filter)
# set stack storage size
bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
bpf_text = bpf_text.replace('MINBLOCK_US_VALUE', str(args.min_block_time))
bpf_text = bpf_text.replace('MAXBLOCK_US_VALUE', str(args.max_block_time))
# handle stack args
kernel_stack_get = "stack_traces.get_stackid(ctx, 0)"
user_stack_get = "stack_traces.get_stackid(ctx, BPF_F_USER_STACK)"
stack_context = ""
if args.user_stacks_only:
stack_context = "user"
kernel_stack_get = "-1"
elif args.kernel_stacks_only:
stack_context = "kernel"
user_stack_get = "-1"
else:
stack_context = "user + kernel"
bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
need_delimiter = args.delimited and not (args.kernel_stacks_only or
args.user_stacks_only)
# check for an edge case; the code below will handle this case correctly
# but ultimately nothing will be displayed
if args.kernel_threads_only and args.user_stacks_only:
print("ERROR: Displaying user stacks for kernel threads " +
"doesn't make sense.", file=stderr)
exit(1)
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
matched = b.num_open_kprobes()
if matched == 0:
print("error: 0 functions traced. Exiting.", file=stderr)
exit(1)
# header
if not folded:
print("Tracing off-CPU time (us) of %s by %s stack" %
(thread_context, stack_context), end="")
if duration < 99999999:
print(" for %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take many seconds, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
if not folded:
print()
missing_stacks = 0
has_enomem = False
counts = b.get_table("counts")
stack_traces = b.get_table("stack_traces")
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
# handle get_stackid errors
if not args.user_stacks_only and stack_id_err(k.kernel_stack_id):
missing_stacks += 1
has_enomem = has_enomem or k.kernel_stack_id == -errno.ENOMEM
if not args.kernel_stacks_only and stack_id_err(k.user_stack_id):
missing_stacks += 1
has_enomem = has_enomem or k.user_stack_id == -errno.ENOMEM
# user stacks will be symbolized by tgid, not pid, to avoid the overhead
# of one symbol resolver per thread
user_stack = [] if k.user_stack_id < 0 else \
stack_traces.walk(k.user_stack_id)
kernel_stack = [] if k.kernel_stack_id < 0 else \
stack_traces.walk(k.kernel_stack_id)
if folded:
# print folded stack output
user_stack = list(user_stack)
kernel_stack = list(kernel_stack)
line = [k.name.decode('utf-8', 'replace')]
# if we failed to get the stack is, such as due to no space (-ENOMEM) or
# hash collision (-EEXIST), we still print a placeholder for consistency
if not args.kernel_stacks_only:
if stack_id_err(k.user_stack_id):
line.append("[Missed User Stack]")
else:
line.extend([b.sym(addr, k.tgid).decode('utf-8', 'replace')
for addr in reversed(user_stack)])
if not args.user_stacks_only:
line.extend(["-"] if (need_delimiter and k.kernel_stack_id >= 0 and k.user_stack_id >= 0) else [])
if stack_id_err(k.kernel_stack_id):
line.append("[Missed Kernel Stack]")
else:
line.extend([b.ksym(addr).decode('utf-8', 'replace')
for addr in reversed(kernel_stack)])
print("%s %d" % (";".join(line), v.value))
else:
# print default multi-line stack output
if not args.user_stacks_only:
if stack_id_err(k.kernel_stack_id):
print(" [Missed Kernel Stack]")
else:
for addr in kernel_stack:
print(" %s" % b.ksym(addr))
if not args.kernel_stacks_only:
if need_delimiter and k.user_stack_id >= 0 and k.kernel_stack_id >= 0:
print(" --")
if stack_id_err(k.user_stack_id):
print(" [Missed User Stack]")
else:
for addr in user_stack:
print(" %s" % b.sym(addr, k.tgid))
print(" %-16s %s (%d)" % ("-", k.name.decode('utf-8', 'replace'), k.pid))
print(" %d\n" % v.value)
if missing_stacks > 0:
enomem_str = "" if not has_enomem else \
" Consider increasing --stack-storage-size."
print("WARNING: %d stack traces lost and could not be displayed.%s" %
(missing_stacks, enomem_str),
file=stderr)
|
|
import collections
import ctypes
import re
import numpy as np
from numba.core import errors, types
from numba.core.typing.templates import signature
# re-export
from numba.core.cgutils import is_nonelike # noqa: F401
numpy_version = tuple(map(int, np.__version__.split('.')[:2]))
FROM_DTYPE = {
np.dtype('bool'): types.boolean,
np.dtype('int8'): types.int8,
np.dtype('int16'): types.int16,
np.dtype('int32'): types.int32,
np.dtype('int64'): types.int64,
np.dtype('uint8'): types.uint8,
np.dtype('uint16'): types.uint16,
np.dtype('uint32'): types.uint32,
np.dtype('uint64'): types.uint64,
np.dtype('float32'): types.float32,
np.dtype('float64'): types.float64,
np.dtype('complex64'): types.complex64,
np.dtype('complex128'): types.complex128,
np.dtype(object): types.pyobject,
}
re_typestr = re.compile(r'[<>=\|]([a-z])(\d+)?$', re.I)
re_datetimestr = re.compile(r'[<>=\|]([mM])8?(\[([a-z]+)\])?$', re.I)
sizeof_unicode_char = np.dtype('U1').itemsize
def _from_str_dtype(dtype):
m = re_typestr.match(dtype.str)
if not m:
raise NotImplementedError(dtype)
groups = m.groups()
typecode = groups[0]
if typecode == 'U':
# unicode
if dtype.byteorder not in '=|':
raise NotImplementedError("Does not support non-native "
"byteorder")
count = dtype.itemsize // sizeof_unicode_char
assert count == int(groups[1]), "Unicode char size mismatch"
return types.UnicodeCharSeq(count)
elif typecode == 'S':
# char
count = dtype.itemsize
assert count == int(groups[1]), "Char size mismatch"
return types.CharSeq(count)
else:
raise NotImplementedError(dtype)
def _from_datetime_dtype(dtype):
m = re_datetimestr.match(dtype.str)
if not m:
raise NotImplementedError(dtype)
groups = m.groups()
typecode = groups[0]
unit = groups[2] or ''
if typecode == 'm':
return types.NPTimedelta(unit)
elif typecode == 'M':
return types.NPDatetime(unit)
else:
raise NotImplementedError(dtype)
def from_dtype(dtype):
"""
Return a Numba Type instance corresponding to the given Numpy *dtype*.
NotImplementedError is raised on unsupported Numpy dtypes.
"""
if type(dtype) == type and issubclass(dtype, np.generic):
dtype = np.dtype(dtype)
elif getattr(dtype, "fields", None) is not None:
return from_struct_dtype(dtype)
try:
return FROM_DTYPE[dtype]
except KeyError:
pass
try:
char = dtype.char
except AttributeError:
pass
else:
if char in 'SU':
return _from_str_dtype(dtype)
if char in 'mM':
return _from_datetime_dtype(dtype)
if char in 'V' and dtype.subdtype is not None:
subtype = from_dtype(dtype.subdtype[0])
return types.NestedArray(subtype, dtype.shape)
raise NotImplementedError(dtype)
_as_dtype_letters = {
types.NPDatetime: 'M8',
types.NPTimedelta: 'm8',
types.CharSeq: 'S',
types.UnicodeCharSeq: 'U',
}
def as_dtype(nbtype):
"""
Return a numpy dtype instance corresponding to the given Numba type.
NotImplementedError is if no correspondence is known.
"""
nbtype = types.unliteral(nbtype)
if isinstance(nbtype, (types.Complex, types.Integer, types.Float)):
return np.dtype(str(nbtype))
if nbtype is types.bool_:
return np.dtype('?')
if isinstance(nbtype, (types.NPDatetime, types.NPTimedelta)):
letter = _as_dtype_letters[type(nbtype)]
if nbtype.unit:
return np.dtype('%s[%s]' % (letter, nbtype.unit))
else:
return np.dtype(letter)
if isinstance(nbtype, (types.CharSeq, types.UnicodeCharSeq)):
letter = _as_dtype_letters[type(nbtype)]
return np.dtype('%s%d' % (letter, nbtype.count))
if isinstance(nbtype, types.Record):
return as_struct_dtype(nbtype)
if isinstance(nbtype, types.EnumMember):
return as_dtype(nbtype.dtype)
if isinstance(nbtype, types.npytypes.DType):
return as_dtype(nbtype.dtype)
if isinstance(nbtype, types.NumberClass):
return as_dtype(nbtype.dtype)
if isinstance(nbtype, types.NestedArray):
spec = (as_dtype(nbtype.dtype), tuple(nbtype.shape))
return np.dtype(spec)
if isinstance(nbtype, types.PyObject):
return np.dtype(object)
raise NotImplementedError("%r cannot be represented as a Numpy dtype"
% (nbtype,))
def as_struct_dtype(rec):
"""Convert Numba Record type to NumPy structured dtype
"""
assert isinstance(rec, types.Record)
names = []
formats = []
offsets = []
titles = []
# Fill the fields if they are not a title.
for k, t in rec.members:
if not rec.is_title(k):
names.append(k)
formats.append(as_dtype(t))
offsets.append(rec.offset(k))
titles.append(rec.fields[k].title)
fields = {
'names': names,
'formats': formats,
'offsets': offsets,
'itemsize': rec.size,
'titles': titles,
}
_check_struct_alignment(rec, fields)
return np.dtype(fields, align=rec.aligned)
def _check_struct_alignment(rec, fields):
"""Check alignment compatibility with Numpy"""
if rec.aligned:
for k, dt in zip(fields['names'], fields['formats']):
llvm_align = rec.alignof(k)
npy_align = dt.alignment
if llvm_align is not None and npy_align != llvm_align:
msg = (
'NumPy is using a different alignment ({}) '
'than Numba/LLVM ({}) for {}. '
'This is likely a NumPy bug.'
)
raise ValueError(msg.format(npy_align, llvm_align, dt))
def map_arrayscalar_type(val):
if isinstance(val, np.generic):
# We can't blindly call np.dtype() as it loses information
# on some types, e.g. datetime64 and timedelta64.
dtype = val.dtype
else:
try:
dtype = np.dtype(type(val))
except TypeError:
raise NotImplementedError("no corresponding numpy dtype "
"for %r" % type(val))
return from_dtype(dtype)
def is_array(val):
return isinstance(val, np.ndarray)
def map_layout(val):
if val.flags['C_CONTIGUOUS']:
layout = 'C'
elif val.flags['F_CONTIGUOUS']:
layout = 'F'
else:
layout = 'A'
return layout
def select_array_wrapper(inputs):
"""
Given the array-compatible input types to an operation (e.g. ufunc),
select the appropriate input for wrapping the operation output,
according to each input's __array_priority__.
An index into *inputs* is returned.
"""
max_prio = float('-inf')
selected_index = None
for index, ty in enumerate(inputs):
# Ties are broken by choosing the first winner, as in Numpy
if (isinstance(ty, types.ArrayCompatible) and
ty.array_priority > max_prio):
selected_index = index
max_prio = ty.array_priority
assert selected_index is not None
return selected_index
def resolve_output_type(context, inputs, formal_output):
"""
Given the array-compatible input types to an operation (e.g. ufunc),
and the operation's formal output type (a types.Array instance),
resolve the actual output type using the typing *context*.
This uses a mechanism compatible with Numpy's __array_priority__ /
__array_wrap__.
"""
selected_input = inputs[select_array_wrapper(inputs)]
args = selected_input, formal_output
sig = context.resolve_function_type('__array_wrap__', args, {})
if sig is None:
if selected_input.array_priority == types.Array.array_priority:
# If it's the same priority as a regular array, assume we
# should return the output unchanged.
# (we can't define __array_wrap__ explicitly for types.Buffer,
# as that would be inherited by most array-compatible objects)
return formal_output
raise errors.TypingError("__array_wrap__ failed for %s" % (args,))
return sig.return_type
def supported_ufunc_loop(ufunc, loop):
"""Return whether the *loop* for the *ufunc* is supported -in nopython-.
*loop* should be a UFuncLoopSpec instance, and *ufunc* a numpy ufunc.
For ufuncs implemented using the ufunc_db, it is supported if the ufunc_db
contains a lowering definition for 'loop' in the 'ufunc' entry.
For other ufuncs, it is type based. The loop will be considered valid if it
only contains the following letter types: '?bBhHiIlLqQfd'. Note this is
legacy and when implementing new ufuncs the ufunc_db should be preferred,
as it allows for a more fine-grained incremental support.
"""
# NOTE: Assuming ufunc for the CPUContext
from numba.np import ufunc_db
loop_sig = loop.ufunc_sig
try:
# check if the loop has a codegen description in the
# ufunc_db. If so, we can proceed.
# note that as of now not all ufuncs have an entry in the
# ufunc_db
supported_loop = loop_sig in ufunc_db.get_ufunc_info(ufunc)
except KeyError:
# for ufuncs not in ufunc_db, base the decision of whether the
# loop is supported on its types
loop_types = [x.char for x in loop.numpy_inputs + loop.numpy_outputs]
supported_types = '?bBhHiIlLqQfd'
# check if all the types involved in the ufunc loop are
# supported in this mode
supported_loop = all(t in supported_types for t in loop_types)
return supported_loop
class UFuncLoopSpec(collections.namedtuple('_UFuncLoopSpec',
('inputs', 'outputs', 'ufunc_sig'))):
"""
An object describing a ufunc loop's inner types. Properties:
- inputs: the inputs' Numba types
- outputs: the outputs' Numba types
- ufunc_sig: the string representing the ufunc's type signature, in
Numpy format (e.g. "ii->i")
"""
__slots__ = ()
@property
def numpy_inputs(self):
return [as_dtype(x) for x in self.inputs]
@property
def numpy_outputs(self):
return [as_dtype(x) for x in self.outputs]
def _ufunc_loop_sig(out_tys, in_tys):
if len(out_tys) == 1:
return signature(out_tys[0], *in_tys)
else:
return signature(types.Tuple(out_tys), *in_tys)
def ufunc_can_cast(from_, to, has_mixed_inputs, casting='safe'):
"""
A variant of np.can_cast() that can allow casting any integer to
any real or complex type, in case the operation has mixed-kind
inputs.
For example we want `np.power(float32, int32)` to be computed using
SP arithmetic and return `float32`.
However, `np.sqrt(int32)` should use DP arithmetic and return `float64`.
"""
from_ = np.dtype(from_)
to = np.dtype(to)
if has_mixed_inputs and from_.kind in 'iu' and to.kind in 'cf':
# Decide that all integers can cast to any real or complex type.
return True
return np.can_cast(from_, to, casting)
def ufunc_find_matching_loop(ufunc, arg_types):
"""Find the appropriate loop to be used for a ufunc based on the types
of the operands
ufunc - The ufunc we want to check
arg_types - The tuple of arguments to the ufunc, including any
explicit output(s).
return value - A UFuncLoopSpec identifying the loop, or None
if no matching loop is found.
"""
# Separate logical input from explicit output arguments
input_types = arg_types[:ufunc.nin]
output_types = arg_types[ufunc.nin:]
assert(len(input_types) == ufunc.nin)
try:
np_input_types = [as_dtype(x) for x in input_types]
except NotImplementedError:
return None
try:
np_output_types = [as_dtype(x) for x in output_types]
except NotImplementedError:
return None
# Whether the inputs are mixed integer / floating-point
has_mixed_inputs = (
any(dt.kind in 'iu' for dt in np_input_types) and
any(dt.kind in 'cf' for dt in np_input_types))
def choose_types(numba_types, ufunc_letters):
"""
Return a list of Numba types representing *ufunc_letters*,
except when the letter designates a datetime64 or timedelta64,
in which case the type is taken from *numba_types*.
"""
assert len(ufunc_letters) >= len(numba_types)
types = [tp if letter in 'mM' else from_dtype(np.dtype(letter))
for tp, letter in zip(numba_types, ufunc_letters)]
# Add missing types (presumably implicit outputs)
types += [from_dtype(np.dtype(letter))
for letter in ufunc_letters[len(numba_types):]]
return types
def set_output_dt_units(inputs, outputs, ufunc_inputs):
"""
Sets the output unit of a datetime type based on the input units
Timedelta is a special dtype that requires the time unit to be
specified (day, month, etc). Not every operation with timedelta inputs
leads to an output of timedelta output. However, for those that do,
the unit of output must be inferred based on the units of the inputs.
At the moment this function takes care of two cases:
a) where all inputs are timedelta with the same unit (mm), and
therefore the output has the same unit.
This case is used for arr.sum, and for arr1+arr2 where all arrays
are timedeltas.
If in the future this needs to be extended to a case with mixed units,
the rules should be implemented in `npdatetime_helpers` and called
from this function to set the correct output unit.
b) where left operand is a timedelta, i.e. the "m?" case. This case
is used for division, eg timedelta / int.
At the time of writing, Numba does not support addition of timedelta
and other types, so this function does not consider the case "?m",
i.e. where timedelta is the right operand to a non-timedelta left
operand. To extend it in the future, just add another elif clause.
"""
def make_specific(outputs, unit):
new_outputs = []
for out in outputs:
if isinstance(out, types.NPTimedelta) and out.unit == "":
new_outputs.append(types.NPTimedelta(unit))
else:
new_outputs.append(out)
return new_outputs
if ufunc_inputs == 'mm':
if all(inp.unit == inputs[0].unit for inp in inputs):
# Case with operation on same units. Operations on different
# units not adjusted for now but might need to be
# added in the future
unit = inputs[0].unit
new_outputs = make_specific(outputs, unit)
else:
return outputs
return new_outputs
elif ufunc_inputs[0] == 'm':
# case where the left operand has timedelta type
unit = inputs[0].unit
new_outputs = make_specific(outputs, unit)
return new_outputs
# In NumPy, the loops are evaluated from first to last. The first one
# that is viable is the one used. One loop is viable if it is possible
# to cast every input operand to the one expected by the ufunc.
# Also under NumPy 1.10+ the output must be able to be cast back
# to a close enough type ("same_kind").
for candidate in ufunc.types:
ufunc_inputs = candidate[:ufunc.nin]
ufunc_outputs = candidate[-ufunc.nout:] if ufunc.nout else []
if 'O' in ufunc_inputs:
# Skip object arrays
continue
found = True
# Skip if any input or output argument is mismatching
for outer, inner in zip(np_input_types, ufunc_inputs):
# (outer is a dtype instance, inner is a type char)
if outer.char in 'mM' or inner in 'mM':
# For datetime64 and timedelta64, we want to retain
# precise typing (i.e. the units); therefore we look for
# an exact match.
if outer.char != inner:
found = False
break
elif not ufunc_can_cast(outer.char, inner,
has_mixed_inputs, 'safe'):
found = False
break
if found:
# Can we cast the inner result to the outer result type?
for outer, inner in zip(np_output_types, ufunc_outputs):
if (outer.char not in 'mM' and not
ufunc_can_cast(inner, outer.char,
has_mixed_inputs, 'same_kind')):
found = False
break
if found:
# Found: determine the Numba types for the loop's inputs and
# outputs.
try:
inputs = choose_types(input_types, ufunc_inputs)
outputs = choose_types(output_types, ufunc_outputs)
# if the left operand or both are timedeltas, then the output
# units need to be determined.
if ufunc_inputs[0] == 'm':
outputs = set_output_dt_units(inputs, outputs, ufunc_inputs)
except NotImplementedError:
# One of the selected dtypes isn't supported by Numba
# (e.g. float16), try other candidates
continue
else:
return UFuncLoopSpec(inputs, outputs, candidate)
return None
def _is_aligned_struct(struct):
return struct.isalignedstruct
def from_struct_dtype(dtype):
"""Convert a NumPy structured dtype to Numba Record type
"""
if dtype.hasobject:
raise TypeError("Do not support dtype containing object")
fields = []
for name, info in dtype.fields.items():
# *info* may have 3 element
[elemdtype, offset] = info[:2]
title = info[2] if len(info) == 3 else None
ty = from_dtype(elemdtype)
infos = {
'type': ty,
'offset': offset,
'title': title,
}
fields.append((name, infos))
# Note: dtype.alignment is not consistent.
# It is different after passing into a recarray.
# recarray(N, dtype=mydtype).dtype.alignment != mydtype.alignment
size = dtype.itemsize
aligned = _is_aligned_struct(dtype)
return types.Record(fields, size, aligned)
def _get_bytes_buffer(ptr, nbytes):
"""
Get a ctypes array of *nbytes* starting at *ptr*.
"""
if isinstance(ptr, ctypes.c_void_p):
ptr = ptr.value
arrty = ctypes.c_byte * nbytes
return arrty.from_address(ptr)
def _get_array_from_ptr(ptr, nbytes, dtype):
return np.frombuffer(_get_bytes_buffer(ptr, nbytes), dtype)
def carray(ptr, shape, dtype=None):
"""
Return a Numpy array view over the data pointed to by *ptr* with the
given *shape*, in C order. If *dtype* is given, it is used as the
array's dtype, otherwise the array's dtype is inferred from *ptr*'s type.
"""
from numba.core.typing.ctypes_utils import from_ctypes
try:
# Use ctypes parameter protocol if available
ptr = ptr._as_parameter_
except AttributeError:
pass
# Normalize dtype, to accept e.g. "int64" or np.int64
if dtype is not None:
dtype = np.dtype(dtype)
if isinstance(ptr, ctypes.c_void_p):
if dtype is None:
raise TypeError("explicit dtype required for void* argument")
p = ptr
elif isinstance(ptr, ctypes._Pointer):
ptrty = from_ctypes(ptr.__class__)
assert isinstance(ptrty, types.CPointer)
ptr_dtype = as_dtype(ptrty.dtype)
if dtype is not None and dtype != ptr_dtype:
raise TypeError("mismatching dtype '%s' for pointer %s"
% (dtype, ptr))
dtype = ptr_dtype
p = ctypes.cast(ptr, ctypes.c_void_p)
else:
raise TypeError("expected a ctypes pointer, got %r" % (ptr,))
nbytes = dtype.itemsize * np.product(shape, dtype=np.intp)
return _get_array_from_ptr(p, nbytes, dtype).reshape(shape)
def farray(ptr, shape, dtype=None):
"""
Return a Numpy array view over the data pointed to by *ptr* with the
given *shape*, in Fortran order. If *dtype* is given, it is used as the
array's dtype, otherwise the array's dtype is inferred from *ptr*'s type.
"""
if not isinstance(shape, int):
shape = shape[::-1]
return carray(ptr, shape, dtype).T
def is_contiguous(dims, strides, itemsize):
"""Is the given shape, strides, and itemsize of C layout?
Note: The code is usable as a numba-compiled function
"""
nd = len(dims)
# Check and skip 1s or 0s in inner dims
innerax = nd - 1
while innerax > -1 and dims[innerax] <= 1:
innerax -= 1
# Early exit if all axis are 1s or 0s
if innerax < 0:
return True
# Check itemsize matches innermost stride
if itemsize != strides[innerax]:
return False
# Check and skip 1s or 0s in outer dims
outerax = 0
while outerax < innerax and dims[outerax] <= 1:
outerax += 1
# Check remaining strides to be contiguous
ax = innerax
while ax > outerax:
if strides[ax] * dims[ax] != strides[ax - 1]:
return False
ax -= 1
return True
def is_fortran(dims, strides, itemsize):
"""Is the given shape, strides, and itemsize of F layout?
Note: The code is usable as a numba-compiled function
"""
nd = len(dims)
# Check and skip 1s or 0s in inner dims
firstax = 0
while firstax < nd and dims[firstax] <= 1:
firstax += 1
# Early exit if all axis are 1s or 0s
if firstax >= nd:
return True
# Check itemsize matches innermost stride
if itemsize != strides[firstax]:
return False
# Check and skip 1s or 0s in outer dims
lastax = nd - 1
while lastax > firstax and dims[lastax] <= 1:
lastax -= 1
# Check remaining strides to be contiguous
ax = firstax
while ax < lastax:
if strides[ax] * dims[ax] != strides[ax + 1]:
return False
ax += 1
return True
def type_can_asarray(arr):
""" Returns True if the type of 'arr' is supported by the Numba `np.asarray`
implementation, False otherwise.
"""
ok = (types.Array, types.Sequence, types.Tuple, types.StringLiteral,
types.Number, types.Boolean, types.containers.ListType)
return isinstance(arr, ok)
|
|
# Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, FindInMap, Ref
from .validators import boolean, integer, integer_range, network_port
class Tag(AWSHelperFn):
def __init__(self, key, value):
self.data = {'Key': key, 'Value': value}
def JSONrepr(self):
return self.data
class CustomerGateway(AWSObject):
resource_type = "AWS::EC2::CustomerGateway"
props = {
'BgpAsn': (integer, True),
'IpAddress': (basestring, True),
'Tags': (list, False),
'Type': (basestring, True),
}
class DHCPOptions(AWSObject):
resource_type = "AWS::EC2::DHCPOptions"
props = {
'DomainName': (basestring, False),
'DomainNameServers': (list, False),
'NetbiosNameServers': (list, False),
'NetbiosNodeType': (integer, False),
'NtpServers': (list, False),
'Tags': (list, False),
}
class EIP(AWSObject):
resource_type = "AWS::EC2::EIP"
props = {
'InstanceId': (basestring, False),
'Domain': (basestring, False),
}
class EIPAssociation(AWSObject):
resource_type = "AWS::EC2::EIPAssociation"
props = {
'AllocationId': (basestring, False),
'EIP': (basestring, False),
'InstanceId': (basestring, False),
'NetworkInterfaceId': (basestring, False),
'PrivateIpAddress': (basestring, False),
}
class EBSBlockDevice(AWSProperty):
props = {
'DeleteOnTermination': (boolean, False),
'Encrypted': (boolean, False),
'Iops': (integer, False), # Conditional
'SnapshotId': (basestring, False), # Conditional
'VolumeSize': (integer, False), # Conditional
'VolumeType': (basestring, False),
}
class BlockDeviceMapping(AWSProperty):
props = {
'DeviceName': (basestring, True),
'Ebs': (EBSBlockDevice, False), # Conditional
'NoDevice': (dict, False),
'VirtualName': (basestring, False), # Conditional
}
class MountPoint(AWSProperty):
props = {
'Device': (basestring, True),
'VolumeId': (basestring, True),
}
class PrivateIpAddressSpecification(AWSProperty):
props = {
'Primary': (boolean, True),
'PrivateIpAddress': (basestring, True),
}
class NetworkInterfaceProperty(AWSProperty):
props = {
'AssociatePublicIpAddress': (boolean, False),
'DeleteOnTermination': (boolean, False),
'Description': (basestring, False),
'DeviceIndex': (integer, True),
'GroupSet': ([basestring, FindInMap, Ref], False),
'NetworkInterfaceId': (basestring, False),
'PrivateIpAddress': (basestring, False),
'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),
'SecondaryPrivateIpAddressCount': (integer, False),
'SubnetId': (basestring, False),
}
class Instance(AWSObject):
resource_type = "AWS::EC2::Instance"
props = {
'AvailabilityZone': (basestring, False),
'BlockDeviceMappings': (list, False),
'DisableApiTermination': (boolean, False),
'EbsOptimized': (boolean, False),
'IamInstanceProfile': (basestring, False),
'ImageId': (basestring, True),
'InstanceInitiatedShutdownBehavior': (basestring, False),
'InstanceType': (basestring, False),
'KernelId': (basestring, False),
'KeyName': (basestring, False),
'Monitoring': (boolean, False),
'NetworkInterfaces': ([NetworkInterfaceProperty], False),
'PlacementGroupName': (basestring, False),
'PrivateIpAddress': (basestring, False),
'RamdiskId': (basestring, False),
'SecurityGroupIds': (list, False),
'SecurityGroups': (list, False),
'SourceDestCheck': (boolean, False),
'SubnetId': (basestring, False),
'Tags': (list, False),
'Tenancy': (basestring, False),
'UserData': (basestring, False),
'Volumes': (list, False),
}
class InternetGateway(AWSObject):
resource_type = "AWS::EC2::InternetGateway"
props = {
'Tags': (list, False),
}
class NetworkAcl(AWSObject):
resource_type = "AWS::EC2::NetworkAcl"
props = {
'Tags': (list, False),
'VpcId': (basestring, True),
}
class ICMP(AWSProperty):
props = {
'Code': (integer, False),
'Type': (integer, False),
}
class PortRange(AWSProperty):
props = {
'From': (network_port, False),
'To': (network_port, False),
}
class NetworkAclEntry(AWSObject):
resource_type = "AWS::EC2::NetworkAclEntry"
props = {
'CidrBlock': (basestring, True),
'Egress': (boolean, True),
'Icmp': (ICMP, False), # Conditional
'NetworkAclId': (basestring, True),
'PortRange': (PortRange, False), # Conditional
'Protocol': (network_port, True),
'RuleAction': (basestring, True),
'RuleNumber': (integer_range(1, 32766), True),
}
class NetworkInterface(AWSObject):
resource_type = "AWS::EC2::NetworkInterface"
props = {
'Description': (basestring, False),
'GroupSet': (list, False),
'PrivateIpAddress': (basestring, False),
'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),
'SecondaryPrivateIpAddressCount': (integer, False),
'SourceDestCheck': (boolean, False),
'SubnetId': (basestring, True),
'Tags': (list, False),
}
class NetworkInterfaceAttachment(AWSObject):
resource_type = "AWS::EC2::NetworkInterfaceAttachment"
props = {
'DeleteOnTermination': (boolean, False),
'DeviceIndex': (integer, True),
'InstanceId': (basestring, True),
'NetworkInterfaceId': (basestring, True),
}
class Route(AWSObject):
resource_type = "AWS::EC2::Route"
props = {
'DestinationCidrBlock': (basestring, True),
'GatewayId': (basestring, False),
'InstanceId': (basestring, False),
'NetworkInterfaceId': (basestring, False),
'RouteTableId': (basestring, True),
'VpcPeeringConnectionId': (basestring, False),
}
class RouteTable(AWSObject):
resource_type = "AWS::EC2::RouteTable"
props = {
'Tags': (list, False),
'VpcId': (basestring, True),
}
class SecurityGroupEgress(AWSObject):
resource_type = "AWS::EC2::SecurityGroupEgress"
props = {
'CidrIp': (basestring, False),
'DestinationSecurityGroupId': (basestring, False),
'FromPort': (network_port, True),
'GroupId': (basestring, True),
'IpProtocol': (basestring, True),
'ToPort': (network_port, True),
#
# Workaround for a bug in CloudFormation and EC2 where the
# DestinationSecurityGroupId property is ignored causing
# egress rules targeting a security group to be ignored.
# Using SourceSecurityGroupId instead works fine even in
# egress rules. AWS have known about this bug for a while.
#
'SourceSecurityGroupId': (basestring, False),
}
class SecurityGroupIngress(AWSObject):
resource_type = "AWS::EC2::SecurityGroupIngress"
props = {
'CidrIp': (basestring, False),
'FromPort': (network_port, False),
'GroupName': (basestring, False),
'GroupId': (basestring, False),
'IpProtocol': (basestring, True),
'SourceSecurityGroupName': (basestring, False),
'SourceSecurityGroupId': (basestring, False),
'SourceSecurityGroupOwnerId': (basestring, False),
'ToPort': (network_port, False),
}
class SecurityGroupRule(AWSProperty):
props = {
'CidrIp': (basestring, False),
'FromPort': (network_port, True),
'IpProtocol': (basestring, True),
'SourceSecurityGroupId': (basestring, False),
'SourceSecurityGroupName': (basestring, False),
'SourceSecurityGroupOwnerId': (basestring, False),
'ToPort': (network_port, True),
'DestinationSecurityGroupId': (basestring, False),
}
class SecurityGroup(AWSObject):
resource_type = "AWS::EC2::SecurityGroup"
props = {
'GroupDescription': (basestring, True),
'SecurityGroupEgress': (list, False),
'SecurityGroupIngress': (list, False),
'VpcId': (basestring, False),
'Tags': (list, False),
}
class Subnet(AWSObject):
resource_type = "AWS::EC2::Subnet"
props = {
'AvailabilityZone': (basestring, False),
'CidrBlock': (basestring, True),
'MapPublicIpOnLaunch': (boolean, False),
'Tags': (list, False),
'VpcId': (basestring, True),
}
class SubnetNetworkAclAssociation(AWSObject):
resource_type = "AWS::EC2::SubnetNetworkAclAssociation"
props = {
'SubnetId': (basestring, True),
'NetworkAclId': (basestring, True),
}
class SubnetRouteTableAssociation(AWSObject):
resource_type = "AWS::EC2::SubnetRouteTableAssociation"
props = {
'RouteTableId': (basestring, True),
'SubnetId': (basestring, True),
}
class Volume(AWSObject):
resource_type = "AWS::EC2::Volume"
props = {
'AvailabilityZone': (basestring, True),
'Encrypted': (boolean, False),
'Iops': (integer, False),
'KmsKeyId': (basestring, False),
'Size': (basestring, False),
'SnapshotId': (basestring, False),
'Tags': (list, False),
'VolumeType': (basestring, False),
}
class VolumeAttachment(AWSObject):
resource_type = "AWS::EC2::VolumeAttachment"
props = {
'Device': (basestring, True),
'InstanceId': (basestring, True),
'VolumeId': (basestring, True),
}
class VPC(AWSObject):
resource_type = "AWS::EC2::VPC"
props = {
'CidrBlock': (basestring, True),
'EnableDnsSupport': (boolean, False),
'EnableDnsHostnames': (boolean, False),
'InstanceTenancy': (basestring, False),
'Tags': (list, False),
}
class VPCDHCPOptionsAssociation(AWSObject):
resource_type = "AWS::EC2::VPCDHCPOptionsAssociation"
props = {
'DhcpOptionsId': (basestring, True),
'VpcId': (basestring, True),
}
class VPCGatewayAttachment(AWSObject):
resource_type = "AWS::EC2::VPCGatewayAttachment"
props = {
'InternetGatewayId': (basestring, False),
'VpcId': (basestring, True),
'VpnGatewayId': (basestring, False),
}
class VPNConnection(AWSObject):
resource_type = "AWS::EC2::VPNConnection"
props = {
'Type': (basestring, True),
'CustomerGatewayId': (basestring, True),
'StaticRoutesOnly': (boolean, False),
'Tags': (list, False),
'VpnGatewayId': (basestring, True),
}
class VPNConnectionRoute(AWSObject):
resource_type = "AWS::EC2::VPNConnectionRoute"
props = {
'DestinationCidrBlock': (basestring, True),
'VpnConnectionId': (basestring, True),
}
class VPNGateway(AWSObject):
resource_type = "AWS::EC2::VPNGateway"
props = {
'Type': (basestring, True),
'Tags': (list, False),
}
class VPNGatewayRoutePropagation(AWSObject):
resource_type = "AWS::EC2::VPNGatewayRoutePropagation"
props = {
'RouteTableIds': ([basestring, Ref], True),
'VpnGatewayId': (basestring, True),
}
class VPCPeeringConnection(AWSObject):
resource_type = "AWS::EC2::VPCPeeringConnection"
props = {
'PeerVpcId': (basestring, True),
'VpcId': (basestring, True),
'Tags': (list, False),
}
|
|
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from oslo import messaging
from oslo.utils import importutils
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import metadata_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron import manager
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
from neutron.plugins.common import constants as pconst
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(sg_rpc.SecurityGroupAgentRpcApiMixin):
def __init__(self, topic):
self.topic = topic
target = messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def port_update(self, context, port):
topic_port_update = topics.get_topic_name(self.client.target.topic,
topics.PORT, topics.UPDATE)
cctxt = self.client.prepare(fanout=True, topic=topic_port_update)
cctxt.cast(context, 'port_update', port=port)
class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
def get_port_from_device(self, device):
port_id = re.sub(r"^%s" % const.TAP_DEVICE_PREFIX, "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug("get_port_and_sgs() called for port_id %s", port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
@property
def l3_plugin(self):
return manager.NeutronManager.get_service_plugins().get(
pconst.L3_ROUTER_NAT)
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers and self.l3_plugin:
routers = []
all_routers = self.l3_plugin.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
if self.l3_plugin:
fl_ips = self.l3_plugin.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_LW("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_LW("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if cfg_vif_type not in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_LW("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
elif 'id' in port:
hostid = porttracker_db.get_port_hostid(context, port['id'])
else:
hostid = None
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
sg_enabled = sg_rpc.is_firewall_enabled()
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: sg_enabled
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_LE("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_LE("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
# core plugin: context is top level object
# ml2: keeps context in _plugin_context
self.servers.set_context(getattr(context, '_plugin_context', context))
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "binding",
"extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_LI('NeutronRestProxy: Starting plugin. Version=%s'),
version.version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug("NeutronRestProxyV2: initialization done")
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.endpoints = [securitygroups_rpc.SecurityGroupServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback(),
metadata_rpc.MetadataRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug("NeutronRestProxyV2: create_network() called")
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug("NeutronRestProxyV2.update_network() called")
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug("NeutronRestProxyV2: delete_network() called")
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach an L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug("NeutronRestProxyV2: create_port() called")
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# non-router port status is set to pending. it is then updated
# after the async rest call completes. router ports are synchronous
if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
port['port']['status'] = const.PORT_STATUS_ACTIVE
else:
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
# ports have to be created synchronously when creating a router
# port since adding router interfaces is a multi-call process
if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
self.servers.rest_create_port(net["tenant_id"],
new_port["network_id"],
mapped_port)
else:
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug("NeutronRestProxyV2: update_port() called")
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
need_port_update_notify = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, orig_port, new_port)
if need_port_update_notify:
self.notifier.port_update(context, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug("NeutronRestProxyV2: delete_port() called")
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check and self.l3_plugin:
self.l3_plugin.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
if self.l3_plugin:
router_ids = self.l3_plugin.disassociate_floatingips(
context, port_id, do_notify=False)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
if self.l3_plugin:
# now that we've left db transaction, we are safe to notify
self.l3_plugin.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug("NeutronRestProxyV2: create_subnet() called")
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug("NeutronRestProxyV2: update_subnet() called")
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug("NeutronRestProxyV2: delete_subnet() called")
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug("Adding host route: ")
LOG.debug("Destination:%(dst)s nexthop:%(next)s",
{'dst': destination, 'next': nexthop})
|
|
# orm/descriptor_props.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from .interfaces import MapperProperty, PropComparator
from .util import _none_set
from . import attributes
from .. import util, sql, exc as sa_exc, event, schema
from ..sql import expression
from . import properties
from . import query
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
def __init__(self, key):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(
fget=fget,
fset=fset,
fdel=fdel,
)
proxy_attr = attributes.\
create_proxied_attribute(self.descriptor)\
(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
.. seealso::
:ref:`mapper_composite`
"""
def __init__(self, class_, *attrs, **kwargs):
"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a full
usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
.. versionchanged:: 0.7
This flag specifically becomes meaningful
- previously it was a placeholder.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does not
load immediately, and is instead loaded when the attribute is first
accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the class.
**Deprecated.** Please see :class:`.AttributeEvents`.
"""
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get('active_history', False)
self.deferred = kwargs.get('deferred', False)
self.group = kwargs.get('group', None)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
if 'info' in kwargs:
self.info = kwargs.pop('info')
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key)
for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [
getattr(self.parent.class_, prop.key)
for prop in self.props
]
@util.memoized_property
def props(self):
props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(attr, _configure_mappers=False)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
else:
raise sa_exc.ArgumentError(
"Composite expects Column objects or mapped "
"attributes/attribute names as arguments, got: %r"
% (attr,))
props.append(prop)
return props
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_class = prop._strategy_lookup(
("deferred", True),
("instrument", True))
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
dict_ = state.dict
if self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
#assert self.key not in dict_
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'expire',
expire_handler, raw=True, propagate=True)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [
prop.key for prop in self.props
]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)]
)
else:
return attributes.History(
(), [self.composite_class(*added)], ()
)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class CompositeBundle(query.Bundle):
def __init__(self, property, expr):
self.property = property
super(CompositeProperty.CompositeBundle, self).__init__(
property.key, *expr)
def create_row_processor(self, query, procs, labels):
def proc(row, result):
return self.property.composite_class(*[proc(row, result) for proc in procs])
return proc
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
See also:
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__hash__ = None
@property
def clauses(self):
return self.__clause_element__()
def __clause_element__(self):
return expression.ClauseList(group=False, *self._comparable_elements)
def _query_clause_element(self):
return CompositeProperty.CompositeBundle(self.prop, self.__clause_element__())
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(
self._adapt_to_entity.entity,
prop.key
) for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b
for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add this "
"property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
:param name: the name of the existing mapped property. This
can refer to the string name of any :class:`.MapperProperty`
configured on the class, including column-bound attributes
and relationships.
:param descriptor: a Python :term:`descriptor` that will be used
as a getter (and potentially a setter) when this attribute is
accessed at the instance level.
:param map_column: if ``True``, the :func:`.synonym` construct will
locate the existing named :class:`.MapperProperty` based on the
attribute name of this :func:`.synonym`, and assign it to a new
attribute linked to the name of this :func:`.synonym`.
That is, given a mapping like::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
job_status = synonym("_job_status", map_column=True)
The above class ``MyClass`` will now have the ``job_status``
:class:`.Column` object mapped to the attribute named ``_job_status``,
and the attribute named ``job_status`` will refer to the synonym
itself. This feature is typically used in conjunction with the
``descriptor`` argument in order to link a user-defined descriptor
as a "wrapper" for an existing column.
:param comparator_factory: A subclass of :class:`.PropComparator`
that will provide custom comparison behavior at the SQL expression
level.
.. note::
For the use case of providing an attribute which redefines both
Python-level and SQL-expression level behavior of an attribute,
please refer to the Hybrid attribute introduced at
:ref:`mapper_hybrids` for a more effective technique.
.. seealso::
:ref:`synonyms` - examples of functionality.
:ref:`mapper_hybrids` - Hybrids provide a better approach for
more complicated attribute-wrapping schemes than synonyms.
"""
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
return getattr(self.parent.class_, self.name).property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
parent._columntoproperty[
parent.mapped_table.c[self.key]
].key == self.name:
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(self, comparator_factory, descriptor=None, doc=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
.. versionchanged:: 0.7
:func:`.comparable_property` is superseded by
the :mod:`~sqlalchemy.ext.hybrid` extension. See the example
at :ref:`hybrid_custom_comparators`.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print SearchWord.word_insensitive == "Trucks"
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
"""
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
|
|
import os.path
import numpy as np
import numpy.ma as ma
from itertools import product
from utils import RNG, Stopwatch, print_inline, width_format
from metrics import accuracy_score
class TrainTestSplitter(object):
"""
A generic class for splitting data into (random) subsets.
Parameters
----------
shuffle : bool, optional
Whether to shuffle the data.
random_seed : None or int, optional
Pseudo-random number generator seed used for random sampling.
Examples
--------
>>> import numpy as np
>>> y = np.array([1, 1, 2, 2, 3, 3, 3])
>>> tts1 = TrainTestSplitter(shuffle=False)
>>> train, test = tts1.split(y, train_ratio=0.5)
>>> print y[train], y[test]
[1 1 2] [2 3 3 3]
>>> train, test = tts1.split(y, train_ratio=0.5, stratify=True)
>>> print y[train], y[test]
[1 2 3] [1 2 3 3]
>>> for fold in tts1.make_k_folds(y, n_folds=3):
... print y[fold]
[1 1 2]
[2 3]
[3 3]
>>> for fold in tts1.make_k_folds(y, n_folds=3, stratify=True):
... print y[fold]
[1 2 3]
[1 2 3]
[3]
>>> for train, test in tts1.k_fold_split(y, n_splits=3):
... print y[train], y[test]
[2 3 3 3] [1 1 2]
[1 1 2 3 3] [2 3]
[1 1 2 2 3] [3 3]
>>> for train, test in tts1.k_fold_split(y, n_splits=3, stratify=True):
... print y[train], y[test]
[1 2 3 3] [1 2 3]
[1 2 3 3] [1 2 3]
[1 2 3 1 2 3] [3]
>>> tts2 = TrainTestSplitter(shuffle=True, random_seed=1337)
>>> train, test = tts2.split(y, train_ratio=0.5)
>>> print y[train], y[test]
[3 2 1] [2 1 3 3]
>>> train, test = tts2.split(y, train_ratio=0.5, stratify=True)
>>> print y[train], y[test]
[3 1 2] [3 3 2 1]
>>> for fold in tts2.make_k_folds(y, n_folds=3):
... print y[fold]
[3 2 1]
[2 1]
[3 3]
>>> for fold in tts2.make_k_folds(y, n_folds=3, stratify=True):
... print y[fold]
[3 1 2]
[3 2 1]
[3]
"""
def __init__(self, shuffle=False, random_seed=None):
self.shuffle = shuffle
self.random_seed = random_seed
self.rng = RNG(self.random_seed)
def split(self, y, train_ratio=0.8, stratify=False):
"""
Split data into train and test subsets.
Parameters
----------
y : (n_samples,) array-like
The target variable for supervised learning problems.
train_ratio : float, 0 < `train_ratio` < 1, optional
the proportion of the dataset to include in the train split.
stratify : bool, optional
If True, the folds are made by preserving the percentage of samples
for each class. Stratification is done based upon the `y` labels.
Returns
-------
train : (n_train,) np.ndarray
The training set indices for that split.
test : (n_samples - n_train,) np.ndarray
The testing set indices for that split.
"""
self.rng.reseed()
n = len(y)
if not stratify:
indices = self.rng.permutation(n) if self.shuffle else np.arange(n, dtype=np.int)
train_size = int(train_ratio * n)
return np.split(indices, (train_size,))
# group indices by label
labels_indices = {}
for index, label in enumerate(y):
if not label in labels_indices: labels_indices[label] = []
labels_indices[label].append(index)
train, test = np.array([], dtype=np.int), np.array([], dtype=np.int)
for label, indices in sorted(labels_indices.items()):
size = int(train_ratio * len(indices))
train = np.concatenate((train, indices[:size]))
test = np.concatenate(( test, indices[size:]))
if self.shuffle:
self.rng.shuffle(train)
self.rng.shuffle(test)
return train, test
def make_k_folds(self, y, n_folds=3, stratify=False):
"""
Split data into folds of (approximately) equal size.
Parameters
----------
y : (n_samples,) array-like
The target variable for supervised learning problems.
Stratification is done based upon the `y` labels.
n_folds : int, `n_folds` > 1, optional
Number of folds.
stratify : bool, optional
If True, the folds are made by preserving the percentage of samples
for each class. Stratification is done based upon the `y` labels.
Yields
------
fold : np.ndarray
Indices for current fold.
"""
self.rng.reseed()
n = len(y)
if not stratify:
indices = self.rng.permutation(n) if self.shuffle else np.arange(n, dtype=np.int)
for fold in np.array_split(indices, n_folds):
yield fold
return
# group indices
labels_indices = {}
for index, label in enumerate(y):
if isinstance(label, np.ndarray):
label = tuple(label.tolist())
if not label in labels_indices:
labels_indices[label] = []
labels_indices[label].append(index)
# split all indices label-wisely
for label, indices in sorted(labels_indices.items()):
labels_indices[label] = np.array_split(indices, n_folds)
# collect respective splits into folds and shuffle if needed
for k in xrange(n_folds):
fold = np.concatenate([indices[k] for _, indices in sorted(labels_indices.items())])
if self.shuffle:
self.rng.shuffle(fold)
yield fold
def k_fold_split(self, y, n_splits=3, stratify=False):
"""
Split data into train and test subsets for K-fold CV.
Parameters
----------
y : (n_samples,) array-like
The target variable for supervised learning problems.
Stratification is done based upon the `y` labels.
n_splits : int, `n_splits` > 1, optional
Number of folds.
stratify : bool, optional
If True, the folds are made by preserving the percentage of samples
for each class. Stratification is done based upon the `y` labels.
Yields
------
train : (n_train,) np.ndarray
The training set indices for current split.
test : (n_samples - n_train,) np.ndarray
The testing set indices for current split.
"""
folds = list(self.make_k_folds(y, n_folds=n_splits, stratify=stratify))
for i in xrange(n_splits):
yield np.concatenate(folds[:i] + folds[(i + 1):]), folds[i]
class GridSearchCV(object):
"""Exhaustive search over specified parameter values for a `model`.
Parameters
----------
model : model object
This is assumed to implement the ml_mnist.BaseEstimator interface.
param_grid : dict[str] = iterable, or iterable of such
Dictionary with parameters possible values or an iterable (e.g. list)
of such dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings. The rest of the parameter are default one.
param_order : {None, [str]} or iterable of such, optional
List of parameter names specifying the order by which parameters in grid
are explored. The smaller index of parameter in this list, the outer, so
to say, for-loop he participates in. If multiple grids are specified,
multiple lists (or None) are expected. If None, parameters are explored in
sorted order (for each grid separately, if multiple of them are provided).
train_test_splitter_params : kwargs, optional
Params passed to `TrainTestSplitter`.
n_splits : int, optional
Number of folds passed to `TrainTestSplitter.k_fold_split`.
scoring : callable, optional
Scoring method to use (the higher value the better).
refit : bool, optional
If False, refit model only for new combination of X, y (and not on
new combinations of parameters). It may be reasonable choice for
non-parametric models, such models as KNNClassifier, for instance.
save_models : bool, optional
If True, save new best models to `dirpath`.
dirpath : str, optional
Where to save models if `save_models` set to True.
save_params : kwargs, optional
Additional params that are passed to `model.save`
verbose : bool, optional
If True, print the results of each iteration.
Attributes
----------
cv_results_ : dict[str] = np.ndarray | np.ma.ndarray
Can be imported into a pandas.DataFrame
best_model_ : model object
Model that was chosen by the search, i.e. which gave highest score.
best_score_ : float
Score of `best_model_` on the left out data.
best_std_ : float
Standard deviation that corresponds to the highest score.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the `cv_results_` arrays) which corresponds to the best
candidate parameter setting.
Examples
--------
>>> from knn import KNNClassifier
>>> param_grid = ({'weights': ['uniform', 'distance'],
... 'p': [1., 2., np.inf],
... 'k': [2, 3, 4]},
... {'kernel': ['rbf', 'poly'],
... 'k': [2, 3],
... 'gamma': [0.01, 0.1]})
>>> param_order = (['weights', 'p'],
... None)
>>> grid_cv = GridSearchCV(model=KNNClassifier(), param_grid=param_grid, param_order=param_order,
... save_models=False, verbose=False)
>>> for params in grid_cv.gen_params():
... print params # note the order
{'p': 1.0, 'k': 2, 'weights': 'uniform'}
{'p': 1.0, 'k': 3, 'weights': 'uniform'}
{'p': 1.0, 'k': 4, 'weights': 'uniform'}
{'p': 2.0, 'k': 2, 'weights': 'uniform'}
{'p': 2.0, 'k': 3, 'weights': 'uniform'}
{'p': 2.0, 'k': 4, 'weights': 'uniform'}
{'p': inf, 'k': 2, 'weights': 'uniform'}
{'p': inf, 'k': 3, 'weights': 'uniform'}
{'p': inf, 'k': 4, 'weights': 'uniform'}
{'p': 1.0, 'k': 2, 'weights': 'distance'}
{'p': 1.0, 'k': 3, 'weights': 'distance'}
{'p': 1.0, 'k': 4, 'weights': 'distance'}
{'p': 2.0, 'k': 2, 'weights': 'distance'}
{'p': 2.0, 'k': 3, 'weights': 'distance'}
{'p': 2.0, 'k': 4, 'weights': 'distance'}
{'p': inf, 'k': 2, 'weights': 'distance'}
{'p': inf, 'k': 3, 'weights': 'distance'}
{'p': inf, 'k': 4, 'weights': 'distance'}
{'kernel': 'rbf', 'k': 2, 'gamma': 0.01}
{'kernel': 'poly', 'k': 2, 'gamma': 0.01}
{'kernel': 'rbf', 'k': 3, 'gamma': 0.01}
{'kernel': 'poly', 'k': 3, 'gamma': 0.01}
{'kernel': 'rbf', 'k': 2, 'gamma': 0.1}
{'kernel': 'poly', 'k': 2, 'gamma': 0.1}
{'kernel': 'rbf', 'k': 3, 'gamma': 0.1}
{'kernel': 'poly', 'k': 3, 'gamma': 0.1}
>>> grid_cv.number_of_combinations()
26
>>> grid_cv.unique_params()
['gamma', 'k', 'kernel', 'p', 'weights']
>>> X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.],
... [0.9, 0.99], [0.1, 0.25], [0.8, 0.2], [0.45, 0.55]]
>>> y = [0, 1, 1, 0, 0, 0, 1, 1]
>>> param_grid = ({'weights': ['uniform', 'distance'], 'k': [2, 3]}, {'p': [1., np.inf], 'k': [2]})
>>> grid_cv = GridSearchCV(model=KNNClassifier(algorithm='kd_tree'), param_grid=param_grid, n_splits=2,
... refit=False, save_models=False ,verbose=False)
>>> grid_cv.fit(X, y) # doctest: +ELLIPSIS
<...model_selection.GridSearchCV object at 0x...>
>>> grid_cv.best_index_
1
>>> grid_cv.best_score_
1.0
>>> grid_cv.best_std_
0.0
>>> grid_cv.best_params_
{'k': 2, 'weights': 'distance'}
>>> grid_cv.best_model_ # doctest: +ELLIPSIS
KNNClassifier(algorithm='kd_tree', k=2,
kd_tree_=<scipy.spatial.ckdtree.cKDTree object at 0x...>,
kernel=None, kernel_params={}, leaf_size=30, metric=None, p=inf,
weights='uniform')
>>> for k, v in sorted(grid_cv.cv_results_.items()):
... print k, ":", v # doctest: +ELLIPSIS
mean_score : [ 0.625 1. 0.125 1. 0.5 0.625]
param_k : [ 2. 2. 3. 3. 2. 2.]
param_p : [-- -- -- -- 1.0 inf]
param_weights : ['uniform' 'distance' 'uniform' 'distance' -- --]
params : [{'k': 2, 'weights': 'uniform'} {'k': 2, 'weights': 'distance'}
{'k': 3, 'weights': 'uniform'} {'k': 3, 'weights': 'distance'}
{'p': 1.0, 'k': 2} {'p': inf, 'k': 2}]
split0_score : [ 0.75 1. 0.25 1. 0.5 0.75]
split0_test_time : [ 0.0... 0.0... 0.0... 0.0... 0.0... 0.0...]
split0_train_time : [...]
split1_score : [ 0.5 1. 0. 1. 0.5 0.5]
split1_test_time : [...]
split1_train_time : [...]
std_score : [ 0.125 0. 0.125 0. 0. 0.125]
"""
def __init__(self, model=None, param_grid={}, param_order=None, train_test_splitter_params={},
n_splits=3, scoring=accuracy_score, refit=True, save_models=False, dirpath='.', save_params={},
verbose=True):
self.model = model
self.param_grid = param_grid
if isinstance(self.param_grid, dict):
self.param_grid = (self.param_grid,)
self.param_order = param_order
if not self.param_order:
self.param_order = [self.param_order] * len(self.param_grid)
self.train_test_splitter_params = train_test_splitter_params
self.n_splits = n_splits
self.scoring = scoring
self.refit = refit
self.save_models = save_models
self.dirpath = dirpath
self.save_params = save_params
self.verbose = verbose
self.cv_results_ = {}
self.best_model_ = self.model
self.best_score_ = -np.inf
self.best_std_ = None
self.best_params_ = None
self.best_index_ = None
def unique_params(self):
unique = set()
for grid in self.param_grid:
unique |= set(grid.keys())
return list(sorted(unique))
def gen_params(self):
"""Generate all possible combinations of params.
Yields
------
params : dict
Current parameters for model.
"""
# convert to zip-lists and use itertools' magic
for i, grid in enumerate(self.param_grid):
zip_lists = []
order = self.param_order[i]
for param_name in sorted(grid, key=lambda x: order.index(x) if (order and x in order) else x):
param_values = grid[param_name]
zip_lists.append([(param_name, v) for v in param_values])
for combination in product(*zip_lists):
yield dict(combination)
def number_of_combinations(self):
return sum(1 for _ in self.gen_params())
def _check_X_y(self, X, y):
if not isinstance(X, np.ndarray):
X = np.asarray(X)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
return X, y
def _best_model_name(self):
name = self.best_model_.model_name()
name += "__acc_"
name += "{0:.5f}".format(self.best_score_).replace('.', '_')
for k, v in sorted(self.best_params_.items()):
name += "__"
name += str(k)
name += "_"
name += str(v)
name += ".json"
return name
def fit(self, X, y):
timer = Stopwatch(verbose=False).start()
X, y = self._check_X_y(X, y)
unique_params = self.unique_params()
tts = TrainTestSplitter(**self.train_test_splitter_params)
number_of_combinations = self.number_of_combinations()
total_iter = self.n_splits * number_of_combinations
current_iter_width = len(str(total_iter))
if self.verbose:
print "Training {0} on {1} samples x {2} features.".format(self.model.model_name(), *X.shape)
print "{0}-fold CV for each of {1} params combinations == {2} fits ...\n"\
.format(self.n_splits, number_of_combinations, total_iter)
# initialize `cv_results_`
self.cv_results_['mean_score'] = []
self.cv_results_['std_score'] = []
self.cv_results_['params'] = []
for k in xrange(self.n_splits):
self.cv_results_['split{0}_score'.format(k)] = []
self.cv_results_['split{0}_train_time'.format(k)] = []
self.cv_results_['split{0}_test_time'.format(k)] = []
for param_name in unique_params:
self.cv_results_['param_{0}'.format(param_name)] = ma.array([])
current_iter = 0
if self.refit:
# for each param combination fit consequently on each fold
# to obtain mean score across splits as soon as possible
for params_index, params in enumerate(self.gen_params()):
# set params and add to `cv_results_`
self.model.reset_params().set_params(**params)
self.cv_results_['params'].append(params)
for param_name in unique_params:
cv_key = 'param_{0}'.format(param_name)
mask = [int(not param_name in params)]
to_concat = ma.array([params.get(param_name, None)], mask=mask)
self.cv_results_[cv_key] = ma.concatenate((self.cv_results_[cv_key],
to_concat))
splits_scores = []
for split_index, (train, test) in enumerate(tts.k_fold_split(y, n_splits=self.n_splits,
stratify=True)):
# verbosing
if self.verbose:
current_iter += 1
t = "iter: {0:{1}}/{2} ".format(current_iter, current_iter_width, total_iter)
t += '+' * (split_index + 1) + '-' * (self.n_splits - split_index - 1)
print_inline(t)
# fit and evaluate
with Stopwatch(verbose=False) as s:
self.model.fit(X[train], y[train])
self.cv_results_['split{0}_train_time'.format(split_index)].append(s.elapsed())
with Stopwatch(verbose=False) as s:
score = self.model.evaluate(X[test], y[test])
self.cv_results_['split{0}_test_time'.format(split_index)].append(s.elapsed())
# score = self.scoring(y[test], y_pred)
splits_scores.append(score)
# add score to `cv_results_`
self.cv_results_['split{0}_score'.format(split_index)].append(score)
# verbosing
if self.verbose:
print_inline(" elapsed: {0} sec".format(
width_format(timer.elapsed(), default_width=7)))
if split_index < self.n_splits - 1:
t = ""
if self.best_score_ > -np.inf:
t += " - best acc.: {0:.4f} at {1}" \
.format(self.best_score_, self.best_params_)
else:
t += " ..."
print t
# compute mean and std score
mean_score = np.mean(splits_scores)
std_score = np.std(splits_scores)
self.cv_results_['mean_score'].append(mean_score)
self.cv_results_['std_score'].append(std_score)
# update 'best' attributes
if mean_score > self.best_score_:
self.best_index_ = params_index
self.best_score_ = mean_score
self.best_std_ = std_score
self.best_params_ = params
self.best_model_ = self.model
if self.save_models:
self.best_model_.save(filepath=os.path.join(self.dirpath, self._best_model_name()),
**self.save_params)
# verbosing
if self.verbose:
print_inline(" - mean acc.: {0:.4f} +/- 2 * {1:.3f}\n"
.format(mean_score, std_score))
else: # if self.refit == False
# fit for each fold and then evaluate on each combination
# of params
for split_index, (train, test) in enumerate(tts.k_fold_split(y, n_splits=self.n_splits,
stratify=True)):
current_best_score = -np.inf
current_best_params = None
for params_index, params in enumerate(self.gen_params()):
# set params
self.model.reset_params().set_params(**params)
# fit model (only once per split)
if params_index == 0:
with Stopwatch(verbose=False) as s:
self.model.fit(X[train], y[train])
# on first split add params to `cv_results_`
if split_index == 0:
# store params' values
self.cv_results_['params'].append(params)
for param_name in unique_params:
cv_key = 'param_{0}'.format(param_name)
mask = [int(not param_name in params)]
to_concat = ma.array([params.get(param_name, None)], mask=mask)
self.cv_results_[cv_key] = ma.concatenate((self.cv_results_[cv_key],
to_concat))
# write training time
self.cv_results_['split{0}_train_time'.format(split_index)]\
.append(s.elapsed() if params_index == 0 else 0.)
# evaluate
with Stopwatch(verbose=False) as s:
score = self.model.evaluate(X[test], y[test])
self.cv_results_['split{0}_test_time'.format(split_index)].append(s.elapsed())
# score = self.scoring(y[test], y_pred)
# add score to `cv_results_`
cv_key = 'split{0}_score'.format(split_index)
self.cv_results_[cv_key].append(score)
# update "current" best score and params
current_mean_score = np.mean([self.cv_results_['split{0}_score'.format(k)][params_index]
for k in xrange(split_index + 1)])
if current_mean_score > current_best_score:
current_best_score = current_mean_score
current_best_params = params
# verbosing
if self.verbose:
current_iter += 1
t = "iter: {0:{1}}/{2} ".format(current_iter, current_iter_width, total_iter)
t += '+' * (split_index + 1) + '-' * (self.n_splits - split_index - 1)
t += " elapsed: {0} sec".format(width_format(timer.elapsed(), default_width=7))
if split_index < self.n_splits - 1:
t += " - best acc.: {0:.4f} [{1}/{2} splits] at {3}"\
.format(current_best_score, split_index + 1, self.n_splits, current_best_params)
print_inline(t)
if split_index < self.n_splits - 1: print
# after last split ...
if split_index == self.n_splits - 1:
# ... compute means, stds
splits_scores = [self.cv_results_['split{0}_score'.format(k)][params_index]
for k in xrange(self.n_splits)]
mean_score = np.mean(splits_scores)
std_score = np.std(splits_scores)
self.cv_results_['mean_score'].append(mean_score)
self.cv_results_['std_score'].append(std_score)
# ... and update best attributes
if mean_score > self.best_score_:
self.best_index_ = params_index
self.best_score_ = mean_score
self.best_std_ = std_score
self.best_params_ = params
self.best_model_ = self.model
if self.save_models:
self.best_model_.save(filepath=os.path.join(self.dirpath, self._best_model_name()),
**self.save_params)
# verbosing
if self.verbose:
print_inline(" - best acc.: {0:.4f} +/- 2 * {1:.3f} at {2}\n"
.format(self.best_score_, self.best_std_, self.best_params_))
# convert lists to np.ndarray
for key in (['mean_score', 'std_score', 'params'] +
['split{0}_{1}'.format(k, s) for k in xrange(self.n_splits)
for s in ('score', 'train_time', 'test_time')]):
self.cv_results_[key] = np.asarray(self.cv_results_[key])
return self
def to_df(self):
import pandas as pd
return pd.DataFrame.from_dict(self.cv_results_).fillna('')
if __name__ == '__main__':
# run corresponding tests
import tests.test_model_selection as t
from utils.testing import run_tests
run_tests(__file__, t)
|
|
'''
Created on Aug 16, 2016
@author: grastogi
'''
import unittest
from ansible.module_utils.network.avi.ansible_utils import \
cleanup_absent_fields, avi_obj_cmp
class TestAviApiUtils(unittest.TestCase):
def test_avi_obj_cmp(self):
obj = {'name': 'testpool'}
existing_obj = {
'lb_algorithm': 'LB_ALGORITHM_LEAST_CONNECTIONS',
'use_service_port': False,
'server_auto_scale': False,
'host_check_enabled': False,
'enabled': True,
'capacity_estimation': False,
'fewest_tasks_feedback_delay': 10,
'_last_modified': '1471377748747040',
'cloud_ref': 'https://192.0.2.42/api/cloud/cloud-afe8bf2c-9821-4272-9bc6-67634c84bec9',
'vrf_ref': 'https://192.0.2.42/api/vrfcontext/vrfcontext-0e8ce760-fed2-4650-9397-5b3e4966376e',
'inline_health_monitor': True,
'default_server_port': 80,
'request_queue_depth': 128,
'graceful_disable_timeout': 1,
'server_count': 0,
'sni_enabled': True,
'request_queue_enabled': False,
'name': 'testpool',
'max_concurrent_connections_per_server': 0,
'url': 'https://192.0.2.42/api/pool/pool-20084ee1-872e-4103-98e1-899103e2242a',
'tenant_ref': 'https://192.0.2.42/api/tenant/admin',
'uuid': 'pool-20084ee1-872e-4103-98e1-899103e2242a',
'connection_ramp_duration': 10}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
def test_avi_obj_cmp_w_refs(self):
obj = {'name': 'testpool',
'health_monitor_refs': ['/api/healthmonitor?name=System-HTTP'],
'enabled': True}
existing_obj = {
'lb_algorithm': 'LB_ALGORITHM_LEAST_CONNECTIONS',
'use_service_port': False,
'server_auto_scale': False,
'host_check_enabled': False,
'enabled': True,
'capacity_estimation': False,
'fewest_tasks_feedback_delay': 10,
'_last_modified': '1471377748747040',
'cloud_ref': 'https://192.0.2.42/api/cloud/cloud-afe8bf2c-9821-4272-9bc6-67634c84bec9',
'vrf_ref': 'https://192.0.2.42/api/vrfcontext/vrfcontext-0e8ce760-fed2-4650-9397-5b3e4966376e',
'inline_health_monitor': True,
'default_server_port': 80,
'request_queue_depth': 128,
'graceful_disable_timeout': 1,
'server_count': 0,
'sni_enabled': True,
'request_queue_enabled': False,
'name': 'testpool',
'max_concurrent_connections_per_server': 0,
'url': 'https://192.0.2.42/api/pool/pool-20084ee1-872e-4103-98e1-899103e2242a',
'tenant_ref': 'https://192.0.2.42/api/tenant/admin',
'uuid': 'pool-20084ee1-872e-4103-98e1-899103e2242a',
'connection_ramp_duration': 10,
'health_monitor_refs': [
"https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc9#System-HTTP"],
}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
obj = {'name': 'testpool',
'health_monitor_refs': ['/api/healthmonitor?name=System-HTTP'],
'server_count': 1}
diff = avi_obj_cmp(obj, existing_obj)
assert not diff
obj = {'name': 'testpool',
'health_monitor_refs': ['api/healthmonitor?name=System-HTTP'],
'server_count': 0}
diff = avi_obj_cmp(obj, existing_obj)
assert not diff
obj = {'name': 'testpool',
'health_monitor_refs': ['healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc9'],
'server_count': 0}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
obj = {'name': 'testpool#asdfasf',
'health_monitor_refs': ['api/healthmonitor?name=System-HTTP'],
'server_count': 0}
diff = avi_obj_cmp(obj, existing_obj)
assert not diff
obj = {'name': 'testpool',
'health_monitor_refs': ['/api/healthmonitor?name=System-HTTP#'],
'server_count': 0}
diff = avi_obj_cmp(obj, existing_obj)
assert not diff
def test_avi_obj_cmp_empty_list(self):
obj = {'name': 'testpool',
'health_monitor_refs': [],
'enabled': True}
existing_obj = {
'lb_algorithm': 'LB_ALGORITHM_LEAST_CONNECTIONS',
'use_service_port': False,
'server_auto_scale': False,
'host_check_enabled': False,
'enabled': True,
'capacity_estimation': False,
'fewest_tasks_feedback_delay': 10,
'_last_modified': '1471377748747040',
'cloud_ref': 'https://192.0.2.42/api/cloud/cloud-afe8bf2c-9821-4272-9bc6-67634c84bec9',
'vrf_ref': 'https://192.0.2.42/api/vrfcontext/vrfcontext-0e8ce760-fed2-4650-9397-5b3e4966376e',
'inline_health_monitor': True,
'default_server_port': 80,
'request_queue_depth': 128,
'graceful_disable_timeout': 1,
'server_count': 0,
'sni_enabled': True,
'request_queue_enabled': False,
'name': 'testpool',
'max_concurrent_connections_per_server': 0,
'url': 'https://192.0.2.42/api/pool/pool-20084ee1-872e-4103-98e1-899103e2242a',
'tenant_ref': 'https://192.0.2.42/api/tenant/admin',
'uuid': 'pool-20084ee1-872e-4103-98e1-899103e2242a',
'connection_ramp_duration': 10
}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
def test_avi_obj_cmp_w_refs_n_name(self):
existing_obj = {
'use_service_port': False,
'server_auto_scale': False,
'host_check_enabled': False,
'enabled': True,
'capacity_estimation': False,
'fewest_tasks_feedback_delay': 10,
'_last_modified': '1471377748747040',
'cloud_ref': 'https://192.0.2.42/api/cloud/cloud-afe8bf2c-9821-4272-9bc6-67634c84bec9',
'vrf_ref': 'https://192.0.2.42/api/vrfcontext/vrfcontext-0e8ce760-fed2-4650-9397-5b3e4966376e',
'inline_health_monitor': True,
'default_server_port': 80,
'request_queue_depth': 128,
'graceful_disable_timeout': 1,
'server_count': 0,
'sni_enabled': True,
'request_queue_enabled': False,
'name': 'testpool',
'max_concurrent_connections_per_server': 0,
'url': 'https://192.0.2.42/api/pool/pool-20084ee1-872e-4103-98e1-899103e2242a',
'tenant_ref': 'https://192.0.2.42/api/tenant/admin',
'uuid': 'pool-20084ee1-872e-4103-98e1-899103e2242a',
'connection_ramp_duration': 10,
'health_monitor_refs': [
"https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc9#System-HTTP",
"https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc8",
],
}
obj = {'name': 'testpool',
'health_monitor_refs': ['https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc9',
"https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc8"],
'server_count': 0}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
obj = {'name': 'testpool',
'health_monitor_refs': [
'https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc9#System-HTTP',
"https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc8"],
'server_count': 0}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
obj = {'name': 'testpool',
'health_monitor_refs': [
'https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc9#System-HTTP',
"https://192.0.2.42/api/healthmonitor/healthmonitor-6d07b57f-126b-476c-baba-a8c8c8b06dc8#System-HTTP2"],
'server_count': 0,
'cloud_ref': 'https://192.0.2.42/api/cloud/cloud-afe8bf2c-9821-4272-9bc6-67634c84bec9#Default-Cloud',
}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
def test_avi_list_update(self):
existing_obj = {
'services': [
{
"enable_ssl": False,
"port_range_end": 80,
"port": 80
},
{
"enable_ssl": False,
"port_range_end": 443,
"port": 443
}
],
"name": "vs-health-test",
"url": "https://192.0.2.42/api/virtualservice/virtualservice-526c55c2-df89-40b9-9de6-e45a472290aa",
}
obj = {
'services': [
{
"enable_ssl": False,
"port_range_end": 80,
"port": 80
}
]
}
diff = avi_obj_cmp(obj, existing_obj)
assert not diff
obj = {
'services': [
{
"enable_ssl": False,
"port_range_end": 80,
"port": 80
},
{
"enable_ssl": False,
"port_range_end": 443,
"port": 80
}
],
"name": "vs-health-test",
"url": "https://192.0.2.42/api/virtualservice/virtualservice-526c55c2-df89-40b9-9de6-e45a472290aa",
}
diff = avi_obj_cmp(obj, existing_obj)
assert not diff
def test_cleanup_abset(self):
obj = {'x': 10,
'y': {'state': 'absent'},
'z': {'a': {'state': 'absent'}},
'l': [{'y1': {'state': 'absent'}}],
'z1': {'a': {'state': 'absent'}, 'b': {}, 'c': 42},
'empty': []}
obj = cleanup_absent_fields(obj)
assert 'y' not in obj
assert 'z' not in obj
assert 'l' not in obj
assert 'z1' in obj
assert 'b' not in obj['z1']
assert 'a' not in obj['z1']
assert 'empty' not in obj
def test_complex_obj(self):
obj = {
'lb_algorithm': 'LB_ALGORITHM_ROUND_ROBIN',
'use_service_port': False, 'server_auto_scale': False,
'host_check_enabled': False,
'tenant_ref': 'https://192.0.2.42/api/tenant/admin#admin',
'capacity_estimation': False,
'servers': [{
'hostname': 'grastogi-server6', 'ratio': 1,
'ip': {'type': 'V4', 'addr': '198.51.100.62'},
'discovered_networks': [{
'subnet': [{
'ip_addr': {
'type': 'V4',
'addr': '198.51.100.0'
},
'mask': 24
}],
'network_ref': 'https://192.0.2.42/api/network/dvportgroup-53975-10.10.2.10#PG-964'
}],
'enabled': True, 'nw_ref': 'https://192.0.2.42/api/vimgrnwruntime/dvportgroup-53975-10.10.2.10#PG-964',
'verify_network': False,
'static': False,
'resolve_server_by_dns': False,
'external_uuid': 'vm-4230615e-bc0b-3d33-3929-1c7328575993',
'vm_ref': 'https://192.0.2.42/api/vimgrvmruntime/vm-4230615e-bc0b-3d33-3929-1c7328575993#grastogi-server6'
}, {
'hostname': 'grastogi-server6',
'ratio': 1,
'ip': {
'type': 'V4',
'addr': '198.51.100.61'
},
'discovered_networks': [{
'subnet': [{
'ip_addr': {
'type': 'V4',
'addr': '198.51.100.0'
},
'mask': 24
}],
'network_ref': 'https://192.0.2.42/api/network/dvportgroup-53975-10.10.2.10#PG-964'
}],
'enabled': True,
'nw_ref': 'https://192.0.2.42/api/vimgrnwruntime/dvportgroup-53975-10.10.2.10#PG-964',
'verify_network': False,
'static': False,
'resolve_server_by_dns': False,
'external_uuid': 'vm-4230615e-bc0b-3d33-3929-1c7328575993',
'vm_ref': 'https://192.0.2.42/api/vimgrvmruntime/vm-4230615e-bc0b-3d33-3929-1c7328575993#grastogi-server6'
}, {
'hostname': 'grastogi-server6',
'ratio': 1,
'ip': {
'type': 'V4',
'addr': '198.51.100.65'
},
'discovered_networks': [{
'subnet': [{
'ip_addr': {
'type': 'V4',
'addr': '198.51.100.0'
}, 'mask': 24
}],
'network_ref': 'https://192.0.2.42/api/network/dvportgroup-53975-10.10.2.10#PG-964'
}],
'enabled': True,
'verify_network': False,
'static': False,
'resolve_server_by_dns': False
}],
'fewest_tasks_feedback_delay': 10,
'_last_modified': '1473292763246107',
'cloud_ref': 'https://192.0.2.42/api/cloud/cloud-e0696a58-8b72-4026-923c-9a87c38a2489#Default-Cloud',
'vrf_ref': 'https://192.0.2.42/api/vrfcontext/vrfcontext-33dfbcd7-867c-4e3e-acf7-96bf679d5a0d#global',
'inline_health_monitor': True,
'default_server_port': 8000,
'request_queue_depth': 128,
'graceful_disable_timeout': 1,
'sni_enabled': True,
'server_count': 3,
'uuid': 'pool-09201181-747e-41ea-872d-e9a7df71b726',
'request_queue_enabled': False,
'name': 'p1',
'max_concurrent_connections_per_server': 0,
'url': 'https://192.0.2.42/api/pool/pool-09201181-747e-41ea-872d-e9a7df71b726#p1',
'enabled': True,
'connection_ramp_duration': 10}
existing_obj = {
'lb_algorithm': 'LB_ALGORITHM_ROUND_ROBIN',
'use_service_port': False,
'server_auto_scale': False,
'host_check_enabled': False,
'tenant_ref': 'https://192.0.2.42/api/tenant/admin',
'capacity_estimation': False,
'servers': [{
'hostname': 'grastogi-server6', 'ratio': 1,
'ip': {
'type': 'V4',
'addr': '198.51.100.62'
},
'discovered_networks': [{
'subnet': [{
'mask': 24,
'ip_addr': {
'type': 'V4',
'addr': '198.51.100.0'
}
}],
'network_ref': 'https://192.0.2.42/api/network/dvportgroup-53975-10.10.2.10'
}],
'enabled': True,
'nw_ref': 'https://192.0.2.42/api/vimgrnwruntime/dvportgroup-53975-10.10.2.10',
'verify_network': False,
'static': False,
'resolve_server_by_dns': False,
'external_uuid': 'vm-4230615e-bc0b-3d33-3929-1c7328575993',
'vm_ref': 'https://192.0.2.42/api/vimgrvmruntime/vm-4230615e-bc0b-3d33-3929-1c7328575993'
}, {
'hostname': 'grastogi-server6',
'ratio': 1,
'ip': {
'type': 'V4',
'addr': '198.51.100.61'
},
'discovered_networks': [{
'subnet': [{
'mask': 24,
'ip_addr': {
'type': 'V4',
'addr': '198.51.100.0'
}
}],
'network_ref': 'https://192.0.2.42/api/network/dvportgroup-53975-10.10.2.10'
}],
'enabled': True,
'nw_ref': 'https://192.0.2.42/api/vimgrnwruntime/dvportgroup-53975-10.10.2.10',
'verify_network': False,
'static': False,
'resolve_server_by_dns': False,
'external_uuid': 'vm-4230615e-bc0b-3d33-3929-1c7328575993',
'vm_ref': 'https://192.0.2.42/api/vimgrvmruntime/vm-4230615e-bc0b-3d33-3929-1c7328575993'
}, {
'hostname': 'grastogi-server6',
'ratio': 1,
'ip': {
'type': 'V4',
'addr': '198.51.100.65'
},
'discovered_networks': [{
'subnet': [{
'mask': 24,
'ip_addr': {
'type': 'V4',
'addr': '198.51.100.0'
}
}],
'network_ref': 'https://192.0.2.42/api/network/dvportgroup-53975-10.10.2.10'
}],
'enabled': True,
'nw_ref': 'https://192.0.2.42/api/vimgrnwruntime/dvportgroup-53975-10.10.2.10',
'verify_network': False,
'static': False,
'resolve_server_by_dns': False,
'external_uuid': 'vm-4230615e-bc0b-3d33-3929-1c7328575993',
'vm_ref': 'https://192.0.2.42/api/vimgrvmruntime/vm-4230615e-bc0b-3d33-3929-1c7328575993'
}],
'fewest_tasks_feedback_delay': 10,
'cloud_ref': 'https://192.0.2.42/api/cloud/cloud-e0696a58-8b72-4026-923c-9a87c38a2489',
'vrf_ref': 'https://192.0.2.42/api/vrfcontext/vrfcontext-33dfbcd7-867c-4e3e-acf7-96bf679d5a0d',
'inline_health_monitor': True,
'default_server_port': 8000,
'request_queue_depth': 128,
'graceful_disable_timeout': 1,
'sni_enabled': True,
'server_count': 3,
'uuid': 'pool-09201181-747e-41ea-872d-e9a7df71b726',
'request_queue_enabled': False,
'name': 'p1',
'max_concurrent_connections_per_server': 0,
'url': 'https://192.0.2.42/api/pool/pool-09201181-747e-41ea-872d-e9a7df71b726',
'enabled': True,
'connection_ramp_duration': 10
}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
def testAWSVs(self):
existing_obj = {
'network_profile_ref': 'https://12.97.16.202/api/networkprofile/networkprofile-9a0a9896-6876-44c8-a3ee-512a968905f2#System-TCP-Proxy',
'port_uuid': 'eni-4144e73c',
'weight': 1,
'availability_zone': 'us-west-2a',
'enabled': True,
'flow_dist': 'LOAD_AWARE',
'subnet_uuid': 'subnet-91f0b6f4',
'delay_fairness': False,
'avi_allocated_vip': True,
'vrf_context_ref': 'https://12.97.16.202/api/vrfcontext/vrfcontext-722b280d-b555-4d82-9b35-af9442c0cb86#global',
'subnet': {
'ip_addr': {
'type': 'V4',
'addr': '198.51.100.0'
},
'mask': 24
},
'cloud_type': 'CLOUD_AWS', 'uuid': 'virtualservice-a5f49b99-22c8-42e6-aa65-3ca5f1e36b9e',
'network_ref': 'https://12.97.16.202/api/network/subnet-91f0b6f4',
'cloud_ref': 'https://12.97.16.202/api/cloud/cloud-49829414-c704-43ca-9dff-05b9e8474dcb#AWS Cloud',
'avi_allocated_fip': False,
'se_group_ref': 'https://12.97.16.202/api/serviceenginegroup/serviceenginegroup-3bef6320-5a2d-4801-85c4-ef4f9841f235#Default-Group',
'scaleout_ecmp': False,
'max_cps_per_client': 0,
'type': 'VS_TYPE_NORMAL',
'analytics_profile_ref': 'https://12.97.16.202/api/analyticsprofile/analyticsprofile-70f8b06f-7b6a-4500-b829-c869bbca2009#System-Analytics-Profile',
'use_bridge_ip_as_vip': False,
'application_profile_ref': 'https://12.97.16.202/api/applicationprofile/applicationprofile-103cbc31-cac5-46ab-8e66-bbbb2c8f551f#System-HTTP',
'auto_allocate_floating_ip': False,
'services': [{
'enable_ssl': False,
'port_range_end': 80,
'port': 80
}],
'active_standby_se_tag': 'ACTIVE_STANDBY_SE_1',
'ip_address': {
'type': 'V4',
'addr': '198.51.100.33'
},
'ign_pool_net_reach': False,
'east_west_placement': False,
'limit_doser': False,
'name': 'wwwawssit.ebiz.verizon.com',
'url': 'https://12.97.16.202/api/virtualservice/virtualservice-a5f49b99-22c8-42e6-aa65-3ca5f1e36b9e#wwwawssit.ebiz.verizon.com',
'ssl_sess_cache_avg_size': 1024,
'enable_autogw': True,
'auto_allocate_ip': True,
'tenant_ref': 'https://12.97.16.202/api/tenant/tenant-f52f7a3e-6876-4bb9-b8f7-3cab636dadf2#Sales',
'remove_listening_port_on_vs_down': False
}
obj = {'auto_allocate_ip': True, 'subnet_uuid': 'subnet-91f0b6f4', 'cloud_ref': '/api/cloud?name=AWS Cloud', 'services': [{'port': 80}],
'name': 'wwwawssit.ebiz.verizon.com'}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
def testhttppolicy(self):
existing_obj = {
"http_request_policy": {
"rules": [{
"enable": True,
"index": 0,
"match": {
"path": {
"match_case": "INSENSITIVE",
"match_criteria": "CONTAINS",
"match_str": ["xvz", "rst"]
}
},
"name": "blah",
"switching_action": {
"action": "HTTP_SWITCHING_SELECT_POOL",
"pool_ref": "https://12.97.16.202/api/pool/pool-d7f6f5e7-bd26-49ad-aeed-965719eb140b#abc",
"status_code": "HTTP_LOCAL_RESPONSE_STATUS_CODE_200"
}
}]
},
"is_internal_policy": False,
"name": "blah",
"tenant_ref": "https://12.97.16.202/api/tenant/tenant-f52f7a3e-6876-4bb9-b8f7-3cab636dadf2#Sales",
"url": "https://12.97.16.202/api/httppolicyset/httppolicyset-ffd8354b-671b-48d5-92cc-69a9057aad0c#blah",
"uuid": "httppolicyset-ffd8354b-671b-48d5-92cc-69a9057aad0c"
}
obj = {
"http_request_policy": {
"rules": [{
"enable": True,
"index": "0",
"match": {
"path": {
"match_case": "INSENSITIVE",
"match_criteria": "CONTAINS",
"match_str": ["xvz", "rst"]
}
},
"name": "blah",
"switching_action": {
"action": "HTTP_SWITCHING_SELECT_POOL",
"pool_ref": "/api/pool?name=abc",
"status_code": "HTTP_LOCAL_RESPONSE_STATUS_CODE_200"
}
}]
},
"is_internal_policy": False,
"tenant": "Sales"
}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
def testCleanupFields(self):
obj = {'name': 'testpool',
'scalar_field': {'state': 'absent'},
'list_fields': [{'x': '1'}, {'y': {'state': 'absent'}}]}
cleanup_absent_fields(obj)
assert 'scalar_field' not in obj
for elem in obj['list_fields']:
assert 'y' not in elem
def testGSLB(self):
obj = {
'domain_names': ['cloud5.avi.com', 'cloud6.avi.com'],
'health_monitor_scope': 'GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS',
'groups': [{
'priority': 20,
'members': [{
'ip': {
'type': 'V4',
'addr': '198.51.100.1'
},
'enabled': True, 'ratio': 1
}, {
'ip': {
'type': 'V4',
'addr': '198.51.100.10'
},
'enabled': True,
'ratio': 1
}],
'algorithm': 'GSLB_ALGORITHM_CONSISTENT_HASH',
'name': 'sc'
}, {
'priority': 14,
'members': [{
'ip': {
'type': 'V4',
'addr': '198.51.100.2'
},
'enabled': True,
'ratio': 1
}],
'algorithm': 'GSLB_ALGORITHM_ROUND_ROBIN',
'name': 'cn'
}, {
'priority': 15,
'members': [{
'ip': {
'type': 'V4',
'addr': '198.51.100.3'
},
'enabled': True, 'ratio': 1
}],
'algorithm': 'GSLB_ALGORITHM_ROUND_ROBIN',
'name': 'in'
}],
'name': 'gs-3',
'num_dns_ip': 2
}
existing_obj = {
u'controller_health_status_enabled': True,
u'uuid': u'gslbservice-ab9b36bd-3e95-4c2e-80f8-92905c2eccb2',
u'wildcard_match': False,
u'url': u'https://192.0.2.42/api/gslbservice/gslbservice-ab9b36bd-3e95-4c2e-80f8-92905c2eccb2#gs-3',
u'tenant_ref': u'https://192.0.2.42/api/tenant/admin#admin',
u'enabled': True,
u'domain_names': [u'cloud5.avi.com', u'cloud6.avi.com'],
u'use_edns_client_subnet': True,
u'groups': [{
u'priority': 20,
u'members': [{
u'ip': {
u'type': u'V4',
u'addr': u'198.51.100.1'
},
u'ratio': 1,
u'enabled': True
}, {
u'ip': {
u'type': u'V4',
u'addr': u'198.51.100.10'
},
u'ratio': 1,
u'enabled': True
}],
u'name': u'sc',
u'algorithm': u'GSLB_ALGORITHM_CONSISTENT_HASH'
}, {
u'priority': 14,
u'members': [{
u'ip': {
u'type': u'V4',
u'addr': u'198.51.100.2'
},
u'ratio': 1,
u'enabled': True
}],
u'name': u'cn',
u'algorithm': u'GSLB_ALGORITHM_ROUND_ROBIN'
}, {
u'priority': 15,
u'members': [{
u'ip': {
u'type': u'V4',
u'addr': u'198.51.100.3'
},
u'ratio': 1,
u'enabled': True
}],
u'name': u'in',
u'algorithm': u'GSLB_ALGORITHM_ROUND_ROBIN'
}],
u'num_dns_ip': 2,
u'health_monitor_scope': u'GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS',
u'name': u'gs-3'
}
diff = avi_obj_cmp(obj, existing_obj)
assert diff
def testNoneParams(self):
objwnone = {
'name': 'testpool',
'scalar_field': None,
'list_fields': {
'y': None,
'z': 'zz'
}
}
obj = {
'name': 'testpool',
'list_fields': {
'z': 'zz'
}
}
result = avi_obj_cmp(objwnone, obj)
assert result
|
|
## statprof.py
## Copyright (C) 2012 Bryan O'Sullivan <[email protected]>
## Copyright (C) 2011 Alex Fraser <alex at phatcore dot com>
## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this program; if not, contact:
##
## Free Software Foundation Voice: +1-617-542-5942
## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
## Boston, MA 02111-1307, USA [email protected]
"""
statprof is intended to be a fairly simple statistical profiler for
python. It was ported directly from a statistical profiler for guile,
also named statprof, available from guile-lib [0].
[0] http://wingolog.org/software/guile-lib/statprof/
To start profiling, call statprof.start():
>>> start()
Then run whatever it is that you want to profile, for example:
>>> import test.pystone; test.pystone.pystones()
Then stop the profiling and print out the results:
>>> stop()
>>> display()
% cumulative self
time seconds seconds name
26.72 1.40 0.37 pystone.py:79:Proc0
13.79 0.56 0.19 pystone.py:133:Proc1
13.79 0.19 0.19 pystone.py:208:Proc8
10.34 0.16 0.14 pystone.py:229:Func2
6.90 0.10 0.10 pystone.py:45:__init__
4.31 0.16 0.06 pystone.py:53:copy
...
All of the numerical data is statistically approximate. In the
following column descriptions, and in all of statprof, "time" refers
to execution time (both user and system), not wall clock time.
% time
The percent of the time spent inside the procedure itself (not
counting children).
cumulative seconds
The total number of seconds spent in the procedure, including
children.
self seconds
The total number of seconds spent in the procedure itself (not
counting children).
name
The name of the procedure.
By default statprof keeps the data collected from previous runs. If you
want to clear the collected data, call reset():
>>> reset()
reset() can also be used to change the sampling frequency from the
default of 1000 Hz. For example, to tell statprof to sample 50 times a
second:
>>> reset(50)
This means that statprof will sample the call stack after every 1/50 of
a second of user + system time spent running on behalf of the python
process. When your process is idle (for example, blocking in a read(),
as is the case at the listener), the clock does not advance. For this
reason statprof is not currently not suitable for profiling io-bound
operations.
The profiler uses the hash of the code object itself to identify the
procedures, so it won't confuse different procedures with the same name.
They will show up as two different rows in the output.
Right now the profiler is quite simplistic. I cannot provide
call-graphs or other higher level information. What you see in the
table is pretty much all there is. Patches are welcome :-)
Threading
---------
Because signals only get delivered to the main thread in Python,
statprof only profiles the main thread. However because the time
reporting function uses per-process timers, the results can be
significantly off if other threads' work patterns are not similar to the
main thread's work patterns.
"""
# no-check-code
from __future__ import absolute_import, division, print_function
import collections
import contextlib
import getopt
import inspect
import json
import os
import signal
import sys
import threading
import time
from .pycompat import open
from . import (
encoding,
pycompat,
)
defaultdict = collections.defaultdict
contextmanager = contextlib.contextmanager
__all__ = [b'start', b'stop', b'reset', b'display', b'profile']
skips = {
"util.py:check",
"extensions.py:closure",
"color.py:colorcmd",
"dispatch.py:checkargs",
"dispatch.py:<lambda>",
"dispatch.py:_runcatch",
"dispatch.py:_dispatch",
"dispatch.py:_runcommand",
"pager.py:pagecmd",
"dispatch.py:run",
"dispatch.py:dispatch",
"dispatch.py:runcommand",
"hg.py:<module>",
"evolve.py:warnobserrors",
}
###########################################################################
## Utils
def clock():
times = os.times()
return (times[0] + times[1], times[4])
###########################################################################
## Collection data structures
class ProfileState(object):
def __init__(self, frequency=None):
self.reset(frequency)
self.track = b'cpu'
def reset(self, frequency=None):
# total so far
self.accumulated_time = (0.0, 0.0)
# start_time when timer is active
self.last_start_time = None
# a float
if frequency:
self.sample_interval = 1.0 / frequency
elif not pycompat.hasattr(self, 'sample_interval'):
# default to 1000 Hz
self.sample_interval = 1.0 / 1000.0
else:
# leave the frequency as it was
pass
self.remaining_prof_time = None
# for user start/stop nesting
self.profile_level = 0
self.samples = []
def accumulate_time(self, stop_time):
increment = (
stop_time[0] - self.last_start_time[0],
stop_time[1] - self.last_start_time[1],
)
self.accumulated_time = (
self.accumulated_time[0] + increment[0],
self.accumulated_time[1] + increment[1],
)
def seconds_per_sample(self):
return self.accumulated_time[self.timeidx] / len(self.samples)
@property
def timeidx(self):
if self.track == b'real':
return 1
return 0
state = ProfileState()
class CodeSite(object):
cache = {}
__slots__ = ('path', 'lineno', 'function', 'source')
def __init__(self, path, lineno, function):
assert isinstance(path, bytes)
self.path = path
self.lineno = lineno
assert isinstance(function, bytes)
self.function = function
self.source = None
def __eq__(self, other):
try:
return self.lineno == other.lineno and self.path == other.path
except:
return False
def __hash__(self):
return hash((self.lineno, self.path))
@classmethod
def get(cls, path, lineno, function):
k = (path, lineno)
try:
return cls.cache[k]
except KeyError:
v = cls(path, lineno, function)
cls.cache[k] = v
return v
def getsource(self, length):
if self.source is None:
lineno = self.lineno - 1
try:
with open(self.path, b'rb') as fp:
for i, line in enumerate(fp):
if i == lineno:
self.source = line.strip()
break
except:
pass
if self.source is None:
self.source = b''
source = self.source
if len(source) > length:
source = source[: (length - 3)] + b"..."
return source
def filename(self):
return os.path.basename(self.path)
def skipname(self):
return '%s:%s' % (self.filename(), self.function)
class Sample(object):
__slots__ = ('stack', 'time')
def __init__(self, stack, time):
self.stack = stack
self.time = time
@classmethod
def from_frame(cls, frame, time):
stack = []
while frame:
stack.append(
CodeSite.get(
pycompat.sysbytes(frame.f_code.co_filename),
frame.f_lineno,
pycompat.sysbytes(frame.f_code.co_name),
)
)
frame = frame.f_back
return Sample(stack, time)
###########################################################################
## SIGPROF handler
def profile_signal_handler(signum, frame):
if state.profile_level > 0:
now = clock()
state.accumulate_time(now)
timestamp = state.accumulated_time[state.timeidx]
state.samples.append(Sample.from_frame(frame, timestamp))
signal.setitimer(signal.ITIMER_PROF, state.sample_interval, 0.0)
state.last_start_time = now
stopthread = threading.Event()
def samplerthread(tid):
while not stopthread.is_set():
now = clock()
state.accumulate_time(now)
frame = sys._current_frames()[tid]
timestamp = state.accumulated_time[state.timeidx]
state.samples.append(Sample.from_frame(frame, timestamp))
state.last_start_time = now
time.sleep(state.sample_interval)
stopthread.clear()
###########################################################################
## Profiling API
def is_active():
return state.profile_level > 0
lastmechanism = None
def start(mechanism=b'thread', track=b'cpu'):
'''Install the profiling signal handler, and start profiling.'''
state.track = track # note: nesting different mode won't work
state.profile_level += 1
if state.profile_level == 1:
state.last_start_time = clock()
rpt = state.remaining_prof_time
state.remaining_prof_time = None
global lastmechanism
lastmechanism = mechanism
if mechanism == b'signal':
signal.signal(signal.SIGPROF, profile_signal_handler)
signal.setitimer(
signal.ITIMER_PROF, rpt or state.sample_interval, 0.0
)
elif mechanism == b'thread':
frame = inspect.currentframe()
tid = [k for k, f in sys._current_frames().items() if f == frame][0]
state.thread = threading.Thread(
target=samplerthread, args=(tid,), name="samplerthread"
)
state.thread.start()
def stop():
'''Stop profiling, and uninstall the profiling signal handler.'''
state.profile_level -= 1
if state.profile_level == 0:
if lastmechanism == b'signal':
rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0)
signal.signal(signal.SIGPROF, signal.SIG_IGN)
state.remaining_prof_time = rpt[0]
elif lastmechanism == b'thread':
stopthread.set()
state.thread.join()
state.accumulate_time(clock())
state.last_start_time = None
statprofpath = encoding.environ.get(b'STATPROF_DEST')
if statprofpath:
save_data(statprofpath)
return state
def save_data(path):
with open(path, b'w+') as file:
file.write(b"%f %f\n" % state.accumulated_time)
for sample in state.samples:
time = sample.time
stack = sample.stack
sites = [
b'\1'.join([s.path, b'%d' % s.lineno, s.function])
for s in stack
]
file.write(b"%d\0%s\n" % (time, b'\0'.join(sites)))
def load_data(path):
lines = open(path, b'rb').read().splitlines()
state.accumulated_time = [float(value) for value in lines[0].split()]
state.samples = []
for line in lines[1:]:
parts = line.split(b'\0')
time = float(parts[0])
rawsites = parts[1:]
sites = []
for rawsite in rawsites:
siteparts = rawsite.split(b'\1')
sites.append(
CodeSite.get(siteparts[0], int(siteparts[1]), siteparts[2])
)
state.samples.append(Sample(sites, time))
def reset(frequency=None):
"""Clear out the state of the profiler. Do not call while the
profiler is running.
The optional frequency argument specifies the number of samples to
collect per second."""
assert state.profile_level == 0, b"Can't reset() while statprof is running"
CodeSite.cache.clear()
state.reset(frequency)
@contextmanager
def profile():
start()
try:
yield
finally:
stop()
display()
###########################################################################
## Reporting API
class SiteStats(object):
def __init__(self, site):
self.site = site
self.selfcount = 0
self.totalcount = 0
def addself(self):
self.selfcount += 1
def addtotal(self):
self.totalcount += 1
def selfpercent(self):
return self.selfcount / len(state.samples) * 100
def totalpercent(self):
return self.totalcount / len(state.samples) * 100
def selfseconds(self):
return self.selfcount * state.seconds_per_sample()
def totalseconds(self):
return self.totalcount * state.seconds_per_sample()
@classmethod
def buildstats(cls, samples):
stats = {}
for sample in samples:
for i, site in enumerate(sample.stack):
sitestat = stats.get(site)
if not sitestat:
sitestat = SiteStats(site)
stats[site] = sitestat
sitestat.addtotal()
if i == 0:
sitestat.addself()
return [s for s in pycompat.itervalues(stats)]
class DisplayFormats:
ByLine = 0
ByMethod = 1
AboutMethod = 2
Hotpath = 3
FlameGraph = 4
Json = 5
Chrome = 6
def display(fp=None, format=3, data=None, **kwargs):
'''Print statistics, either to stdout or the given file object.'''
if data is None:
data = state
if fp is None:
import sys
fp = sys.stdout
if len(data.samples) == 0:
fp.write(b'No samples recorded.\n')
return
if format == DisplayFormats.ByLine:
display_by_line(data, fp)
elif format == DisplayFormats.ByMethod:
display_by_method(data, fp)
elif format == DisplayFormats.AboutMethod:
display_about_method(data, fp, **kwargs)
elif format == DisplayFormats.Hotpath:
display_hotpath(data, fp, **kwargs)
elif format == DisplayFormats.FlameGraph:
write_to_flame(data, fp, **kwargs)
elif format == DisplayFormats.Json:
write_to_json(data, fp)
elif format == DisplayFormats.Chrome:
write_to_chrome(data, fp, **kwargs)
else:
raise Exception(b"Invalid display format")
if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
fp.write(b'---\n')
fp.write(b'Sample count: %d\n' % len(data.samples))
fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time)
def display_by_line(data, fp):
"""Print the profiler data with each sample line represented
as one row in a table. Sorted by self-time per line."""
stats = SiteStats.buildstats(data.samples)
stats.sort(reverse=True, key=lambda x: x.selfseconds())
fp.write(
b'%5.5s %10.10s %7.7s %-8.8s\n'
% (b'% ', b'cumulative', b'self', b'')
)
fp.write(
b'%5.5s %9.9s %8.8s %-8.8s\n'
% (b"time", b"seconds", b"seconds", b"name")
)
for stat in stats:
site = stat.site
sitelabel = b'%s:%d:%s' % (site.filename(), site.lineno, site.function)
fp.write(
b'%6.2f %9.2f %9.2f %s\n'
% (
stat.selfpercent(),
stat.totalseconds(),
stat.selfseconds(),
sitelabel,
)
)
def display_by_method(data, fp):
"""Print the profiler data with each sample function represented
as one row in a table. Important lines within that function are
output as nested rows. Sorted by self-time per line."""
fp.write(
b'%5.5s %10.10s %7.7s %-8.8s\n'
% (b'% ', b'cumulative', b'self', b'')
)
fp.write(
b'%5.5s %9.9s %8.8s %-8.8s\n'
% (b"time", b"seconds", b"seconds", b"name")
)
stats = SiteStats.buildstats(data.samples)
grouped = defaultdict(list)
for stat in stats:
grouped[stat.site.filename() + b":" + stat.site.function].append(stat)
# compute sums for each function
functiondata = []
for fname, sitestats in pycompat.iteritems(grouped):
total_cum_sec = 0
total_self_sec = 0
total_percent = 0
for stat in sitestats:
total_cum_sec += stat.totalseconds()
total_self_sec += stat.selfseconds()
total_percent += stat.selfpercent()
functiondata.append(
(fname, total_cum_sec, total_self_sec, total_percent, sitestats)
)
# sort by total self sec
functiondata.sort(reverse=True, key=lambda x: x[2])
for function in functiondata:
if function[3] < 0.05:
continue
fp.write(
b'%6.2f %9.2f %9.2f %s\n'
% (
function[3], # total percent
function[1], # total cum sec
function[2], # total self sec
function[0],
)
) # file:function
function[4].sort(reverse=True, key=lambda i: i.selfseconds())
for stat in function[4]:
# only show line numbers for significant locations (>1% time spent)
if stat.selfpercent() > 1:
source = stat.site.getsource(25)
if sys.version_info.major >= 3 and not isinstance(
source, bytes
):
source = pycompat.bytestr(source)
stattuple = (
stat.selfpercent(),
stat.selfseconds(),
stat.site.lineno,
source,
)
fp.write(b'%33.0f%% %6.2f line %d: %s\n' % stattuple)
def display_about_method(data, fp, function=None, **kwargs):
if function is None:
raise Exception(b"Invalid function")
filename = None
if b':' in function:
filename, function = function.split(b':')
relevant_samples = 0
parents = {}
children = {}
for sample in data.samples:
for i, site in enumerate(sample.stack):
if site.function == function and (
not filename or site.filename() == filename
):
relevant_samples += 1
if i != len(sample.stack) - 1:
parent = sample.stack[i + 1]
if parent in parents:
parents[parent] = parents[parent] + 1
else:
parents[parent] = 1
if site in children:
children[site] = children[site] + 1
else:
children[site] = 1
parents = [(parent, count) for parent, count in pycompat.iteritems(parents)]
parents.sort(reverse=True, key=lambda x: x[1])
for parent, count in parents:
fp.write(
b'%6.2f%% %s:%s line %s: %s\n'
% (
count / relevant_samples * 100,
pycompat.fsencode(parent.filename()),
pycompat.sysbytes(parent.function),
parent.lineno,
pycompat.sysbytes(parent.getsource(50)),
)
)
stats = SiteStats.buildstats(data.samples)
stats = [
s
for s in stats
if s.site.function == function
and (not filename or s.site.filename() == filename)
]
total_cum_sec = 0
total_self_sec = 0
total_self_percent = 0
total_cum_percent = 0
for stat in stats:
total_cum_sec += stat.totalseconds()
total_self_sec += stat.selfseconds()
total_self_percent += stat.selfpercent()
total_cum_percent += stat.totalpercent()
fp.write(
b'\n %s:%s Total: %0.2fs (%0.2f%%) Self: %0.2fs (%0.2f%%)\n\n'
% (
pycompat.sysbytes(filename or b'___'),
pycompat.sysbytes(function),
total_cum_sec,
total_cum_percent,
total_self_sec,
total_self_percent,
)
)
children = [(child, count) for child, count in pycompat.iteritems(children)]
children.sort(reverse=True, key=lambda x: x[1])
for child, count in children:
fp.write(
b' %6.2f%% line %s: %s\n'
% (
count / relevant_samples * 100,
child.lineno,
pycompat.sysbytes(child.getsource(50)),
)
)
def display_hotpath(data, fp, limit=0.05, **kwargs):
class HotNode(object):
def __init__(self, site):
self.site = site
self.count = 0
self.children = {}
def add(self, stack, time):
self.count += time
site = stack[0]
child = self.children.get(site)
if not child:
child = HotNode(site)
self.children[site] = child
if len(stack) > 1:
i = 1
# Skip boiler plate parts of the stack
while i < len(stack) and stack[i].skipname() in skips:
i += 1
if i < len(stack):
child.add(stack[i:], time)
else:
# Normally this is done by the .add() calls
child.count += time
root = HotNode(None)
lasttime = data.samples[0].time
for sample in data.samples:
root.add(sample.stack[::-1], sample.time - lasttime)
lasttime = sample.time
showtime = kwargs.get('showtime', True)
def _write(node, depth, multiple_siblings):
site = node.site
visiblechildren = [
c
for c in pycompat.itervalues(node.children)
if c.count >= (limit * root.count)
]
if site:
indent = depth * 2 - 1
filename = (site.filename() + b':').ljust(15)
function = site.function
# lots of string formatting
listpattern = (
b''.ljust(indent)
+ (b'\\' if multiple_siblings else b'|')
+ b' %4.1f%%'
+ (b' %5.2fs' % node.count if showtime else b'')
+ b' %s %s'
)
liststring = listpattern % (
node.count / root.count * 100,
filename,
function,
)
# 4 to account for the word 'line'
spacing_len = max(4, 55 - len(liststring))
prefix = b''
if spacing_len == 4:
prefix = b', '
codepattern = b'%s%s %d: %s%s'
codestring = codepattern % (
prefix,
b'line'.rjust(spacing_len),
site.lineno,
b''.ljust(max(0, 4 - len(str(site.lineno)))),
site.getsource(30),
)
finalstring = liststring + codestring
childrensamples = sum(
[c.count for c in pycompat.itervalues(node.children)]
)
# Make frames that performed more than 10% of the operation red
if node.count - childrensamples > (0.1 * root.count):
finalstring = b'\033[91m' + finalstring + b'\033[0m'
# Make frames that didn't actually perform work dark grey
elif node.count - childrensamples == 0:
finalstring = b'\033[90m' + finalstring + b'\033[0m'
fp.write(finalstring + b'\n')
newdepth = depth
if len(visiblechildren) > 1 or multiple_siblings:
newdepth += 1
visiblechildren.sort(reverse=True, key=lambda x: x.count)
for child in visiblechildren:
_write(child, newdepth, len(visiblechildren) > 1)
if root.count > 0:
_write(root, 0, False)
def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs):
if scriptpath is None:
scriptpath = encoding.environ[b'HOME'] + b'/flamegraph.pl'
if not os.path.exists(scriptpath):
fp.write(b'error: missing %s\n' % scriptpath)
fp.write(b'get it here: https://github.com/brendangregg/FlameGraph\n')
return
lines = {}
for sample in data.samples:
sites = [s.function for s in sample.stack]
sites.reverse()
line = b';'.join(sites)
if line in lines:
lines[line] = lines[line] + 1
else:
lines[line] = 1
fd, path = pycompat.mkstemp()
with open(path, b"w+") as file:
for line, count in pycompat.iteritems(lines):
file.write(b"%s %d\n" % (line, count))
if outputfile is None:
outputfile = b'~/flamegraph.svg'
os.system(b"perl ~/flamegraph.pl %s > %s" % (path, outputfile))
fp.write(b'Written to %s\n' % outputfile)
_pathcache = {}
def simplifypath(path):
"""Attempt to make the path to a Python module easier to read by
removing whatever part of the Python search path it was found
on."""
if path in _pathcache:
return _pathcache[path]
hgpath = encoding.__file__.rsplit(os.sep, 2)[0]
for p in [hgpath] + sys.path:
prefix = p + os.sep
if path.startswith(prefix):
path = path[len(prefix) :]
break
_pathcache[path] = path
return path
def write_to_json(data, fp):
samples = []
for sample in data.samples:
stack = []
for frame in sample.stack:
stack.append(
(
pycompat.sysstr(frame.path),
frame.lineno,
pycompat.sysstr(frame.function),
)
)
samples.append((sample.time, stack))
data = json.dumps(samples)
if not isinstance(data, bytes):
data = data.encode('utf-8')
fp.write(data)
def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
samples = []
laststack = collections.deque()
lastseen = collections.deque()
# The Chrome tracing format allows us to use a compact stack
# representation to save space. It's fiddly but worth it.
# We maintain a bijection between stack and ID.
stack2id = {}
id2stack = [] # will eventually be rendered
def stackid(stack):
if not stack:
return
if stack in stack2id:
return stack2id[stack]
parent = stackid(stack[1:])
myid = len(stack2id)
stack2id[stack] = myid
id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
if parent is not None:
id2stack[-1].update(parent=parent)
return myid
# The sampling profiler can sample multiple times without
# advancing the clock, potentially causing the Chrome trace viewer
# to render single-pixel columns that we cannot zoom in on. We
# work around this by pretending that zero-duration samples are a
# millisecond in length.
clamp = 0.001
# We provide knobs that by default attempt to filter out stack
# frames that are too noisy:
#
# * A few take almost all execution time. These are usually boring
# setup functions, giving a stack that is deep but uninformative.
#
# * Numerous samples take almost no time, but introduce lots of
# noisy, oft-deep "spines" into a rendered profile.
blacklist = set()
totaltime = data.samples[-1].time - data.samples[0].time
minthreshold = totaltime * minthreshold
maxthreshold = max(totaltime * maxthreshold, clamp)
def poplast():
oldsid = stackid(tuple(laststack))
oldcat, oldfunc = laststack.popleft()
oldtime, oldidx = lastseen.popleft()
duration = sample.time - oldtime
if minthreshold <= duration <= maxthreshold:
# ensure no zero-duration events
sampletime = max(oldtime + clamp, sample.time)
samples.append(
dict(
ph='E',
name=oldfunc,
cat=oldcat,
sf=oldsid,
ts=sampletime * 1e6,
pid=0,
)
)
else:
blacklist.add(oldidx)
# Much fiddling to synthesize correctly(ish) nested begin/end
# events given only stack snapshots.
for sample in data.samples:
stack = tuple(
(
(
'%s:%d'
% (simplifypath(pycompat.sysstr(frame.path)), frame.lineno),
pycompat.sysstr(frame.function),
)
for frame in sample.stack
)
)
qstack = collections.deque(stack)
if laststack == qstack:
continue
while laststack and qstack and laststack[-1] == qstack[-1]:
laststack.pop()
qstack.pop()
while laststack:
poplast()
for f in reversed(qstack):
lastseen.appendleft((sample.time, len(samples)))
laststack.appendleft(f)
path, name = f
sid = stackid(tuple(laststack))
samples.append(
dict(
ph='B',
name=name,
cat=path,
ts=sample.time * 1e6,
sf=sid,
pid=0,
)
)
laststack = collections.deque(stack)
while laststack:
poplast()
events = [
sample for idx, sample in enumerate(samples) if idx not in blacklist
]
frames = collections.OrderedDict(
(str(k), v) for (k, v) in enumerate(id2stack)
)
data = json.dumps(dict(traceEvents=events, stackFrames=frames), indent=1)
if not isinstance(data, bytes):
data = data.encode('utf-8')
fp.write(data)
fp.write(b'\n')
def printusage():
print(
r"""
The statprof command line allows you to inspect the last profile's results in
the following forms:
usage:
hotpath [-l --limit percent]
Shows a graph of calls with the percent of time each takes.
Red calls take over 10%% of the total time themselves.
lines
Shows the actual sampled lines.
functions
Shows the samples grouped by function.
function [filename:]functionname
Shows the callers and callees of a particular function.
flame [-s --script-path] [-o --output-file path]
Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg)
Requires that ~/flamegraph.pl exist.
(Specify alternate script path with --script-path.)"""
)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
printusage()
return 0
displayargs = {}
optstart = 2
displayargs[b'function'] = None
if argv[1] == 'hotpath':
displayargs[b'format'] = DisplayFormats.Hotpath
elif argv[1] == 'lines':
displayargs[b'format'] = DisplayFormats.ByLine
elif argv[1] == 'functions':
displayargs[b'format'] = DisplayFormats.ByMethod
elif argv[1] == 'function':
displayargs[b'format'] = DisplayFormats.AboutMethod
displayargs[b'function'] = argv[2]
optstart = 3
elif argv[1] == 'flame':
displayargs[b'format'] = DisplayFormats.FlameGraph
else:
printusage()
return 0
# process options
try:
opts, args = pycompat.getoptb(
sys.argv[optstart:],
b"hl:f:o:p:",
[b"help", b"limit=", b"file=", b"output-file=", b"script-path="],
)
except getopt.error as msg:
print(msg)
printusage()
return 2
displayargs[b'limit'] = 0.05
path = None
for o, value in opts:
if o in ("-l", "--limit"):
displayargs[b'limit'] = float(value)
elif o in ("-f", "--file"):
path = value
elif o in ("-o", "--output-file"):
displayargs[b'outputfile'] = value
elif o in ("-p", "--script-path"):
displayargs[b'scriptpath'] = value
elif o in ("-h", "help"):
printusage()
return 0
else:
assert False, b"unhandled option %s" % o
if not path:
print('must specify --file to load')
return 1
load_data(path=path)
display(**pycompat.strkwargs(displayargs))
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
from pandaepl import Model, MovingObject, Avatar, VideoLogQueue, Camera
from panda3d.core import Point3, TransparencyAttrib
from load_models import load_models, get_model
import moBananas as mB
import random
from sys import stdout
from math import sin, cos, radians
def check_repeat(trial_num, original_list):
# used in gobananas
# this function decides what kind of trial we are setting up,
# if starting a new block of trials will decide which one will
# be the new repeat trial. Do this first, in case this trial
# (first of new block) is going to be the repeat
repeat_list = original_list[:]
trial_type = ''
if trial_num > 0 and trial_num % repeat_list[0] == 0:
# time to choose the next trial that will be a repeat,
# choose a number from 0 to repeat number and add it to this trial number
repeat_list[2] = trial_num + random.choice(range(repeat_list[0]))
# print('chose trial', repeat_list[2])
# if we are on a now_repeat trial, and now the trial number is less than repeat number,
# it is the first one and we are collecting
if trial_num == repeat_list[1]:
# repeat_list[1] is the trial number for collecting positions
# print 'collecting positions for repeat'
trial_type = 'new'
elif trial_num == repeat_list[2]:
# and now we are repeating
# print 'repeat'
trial_type = 'repeat'
return repeat_list, trial_type
def create_alt_fruit_area(subarea_key=None, alt_subarea=None):
# alternate fruit can be directed to a specific subarea, or can be
# chosen automatically as any section except the one the recall fruit
# is in.
if alt_subarea:
area_list = alt_subarea
else:
area_list = range(1, 10)
if subarea_key < 10:
area_list.remove(subarea_key)
# print('alternate fruit in area:', area_list)
return area_list
class Fruit():
def __init__(self, config):
# Used in both goBananas and bananaRecall, sometimes referred to as regular
# and sequential tasks, respectively.
self.config = config
# if this is not a sequential memory task, this might not be set
self.config.setdefault('fruit_to_remember', False)
if self.config['fruit_to_remember']:
# print 'recall task'
# give exact location according to dictionary in config
self.manual = config.get('manual')
self.subarea_key = config.get('subarea', 0)
# bring this into a variable, so we can toggle it.
# repeats until given new area by default if manual,
# if set that way for random
if self.manual:
self.repeat = True
if self.subarea_key > 9:
raise ValueError('manual subareas greater than 9 have no meaning')
else:
self.repeat = config['repeat_recall_fruit']
# print 'fruit manual, repeat, area', self.manual, self.repeat, self.subarea_key
self.new_subarea_key = self.subarea_key
# print('subarea', self.subarea_key)
self.alpha = self.config['alpha']
# print('alpha', self.alpha)
self.num_shows = 0
else:
self.subarea_key = None
# for repeating a particular configuration
# able to toggle this in bananaRecall, so making it a variable in both games to
# simplify things.
self.repeat = self.config.get('fruit_repeat', False) # assume false if none provided
if self.repeat:
start_number = random.choice(range(self.config['repeat_number']))
# repeat_list is a list of variables we care about for repeating trials
# the first two will not change, last one changes each time we enter a new block
# [frequency of repeat, start number, next number]
self.repeat_list = [self.config['repeat_number'], start_number, start_number]
# just for gobananas
if self.config.get('go_alpha', False):
self.alpha = self.config['alpha']
# print('alpha', self.alpha)
# affects alpha in both
if self.config.get('alpha', False):
self.alpha_node_path = None
# variable to keep track of which fruit is alpha (only important for gobananas,
# since alpha fruit is always recall fruit in recall fruit)
self.alpha_fruit = False
# num_fruit dict will tell us how many of each fruit we will be showing
self.num_fruit_dict = {}
# dictionary of actual fruit models
self.fruit_models = {}
# list to keep track of which fruit have shown up
self.fruit_list = []
# keeps track of reward beeps
self.beeps = None
# variable to make sure we don't collide more than once into the same fruit
self.first_collision = True
# variable to save the fruit we ran into most recently
self.current_fruit = None
# dictionary to save positions for repeated trials or single fruit
if self.config.get('fruit_dict', False):
self.pos_dict = self.config['fruit_dict']
else:
self.pos_dict = {}
# print 'start', self.pos_dict
def create_fruit(self, fruit_dict):
self.num_fruit_dict = fruit_dict
# return a fruitModel.
# print 'create bananas'
# print 'dict of number fruit', fruit_dict
# load the models
load_models()
# random alpha?
test_alpha = self.config.get('go_alpha', False)
# for each fruit in our dictionary, find corresponding model,
# create new model for each count in dictionary of that fruit
# This is a couple of loops, fortunately they are all small.
# print 'making fruit dictionary'
for fruit, count in fruit_dict.iteritems():
for i in range(count):
item = get_model('name', fruit)
# print item.model
name = item.name + "%03d" % i
# differentiate the fruit we are remembering,
# if we are doing recall_banana task
if self.config['fruit_to_remember'] and item.name == self.config['fruit_to_remember']:
name = item.name
# print name
# create actual model
self.create_fruit_model(item, name)
# check if we are making a fruit semi-transparent
# will choose the first fruit of the given type
if test_alpha and item.name == test_alpha:
self.alpha_fruit = name
# print self.alpha_fruit
test_alpha = self.set_alpha_fruit(name, True)
# if we are doing recall, set ability to use alpha
if self.config['fruit_to_remember']:
self.set_alpha_fruit(self.config['fruit_to_remember'])
# print self.fruit_models
# print 'end create fruit'
def create_fruit_model(self, item, name):
# initial position does not matter
model = Model.Model(name, item.model, Point3(0, 0, 1), self.collide_fruit)
try:
roll = item.roll
except AttributeError:
roll = 0
model.setHpr(Point3(random.randint(0, 360), 0, roll))
model.setScale(item.scale)
model.name = name
try:
model.retrNodePath().getChild(0).getChild(0).setScale(item.coll_scale)
except AssertionError:
print "no collision sphere detected"
# print model.retrNodePath().getChild(0).getChild(0).getChild(0)
# uncomment to show collision sphere
# model.retrNodePath().getChild(0).getChild(0).getChild(0).show()
# hide all models on creation
model.setStashed(True)
self.fruit_models[name] = model
def setup_gobananas_trial(self, trial_num):
trial_type = ''
if self.repeat:
self.repeat_list, trial_type = check_repeat(trial_num, self.repeat_list)
# print self.repeat_list
if self.config.get('fruit_dict', False):
trial_type = 'repeat'
# print('got stuff back', trial_type)
# print('trial number to be repeated', self.repeat_list[1])
avatar_x_y = self.log_new_trial(trial_type, trial_num)
new_pos_dict = self.setup_fruit_for_trial(avatar_x_y, trial_type)
self.change_positions(new_pos_dict)
def setup_fruit_for_trial(self, avatar_x_y, repeat='No'):
# print('repeat_trial_type', repeat)
# print 'setup fruit for trial'
# get positions for fruit
# if repeat has 'repeat' in it, use same positions as before
# if repeat is 'new', use new positions, save configuration
# pos_list is used to make sure we are not putting fruit too close
# together or too close to the avatar
pos_list = []
# pos_dict is returned so we have a dictionary of the new positions
pos_dict = {}
# make sure start with empty list
self.fruit_list = []
for name, fruit in self.fruit_models.iteritems():
# print name
# print pos_list
# print('repeat', repeat)
overload = False
if repeat == 'repeat':
if name in self.pos_dict:
# get x,y from the dictionary
(x, y) = self.pos_dict[name]
if not mB.check_distances_good(x, y, pos_list, avatar_x_y, self.config):
# print 'too close'
x = None
else:
# print 'no position, get random'
overload = True
if repeat != 'repeat' or overload:
# get new positions
(x, y) = mB.get_random_xy(pos_list, avatar_x_y, self.config)
pos_list.append((x, y))
# print pos_list
# print('current positions', name, x, y)
if x is not None:
pos_dict[name] = (x, y)
self.make_fruit_visible(name, repeat)
if repeat == 'new':
# print 'save new'
# save new banana placements
self.pos_dict = pos_dict
# print 'position', self.pos_dict
# print pos_dict
# print('fruit list', self.fruit_list)
return pos_dict
def start_recall_trial(self, trial_num):
remember, trial_type = self.setup_recall_trial(trial_num)
# print 'setup'
avatar_x_y = self.log_new_trial(trial_type, trial_num)
# print 'log'
new_pos_dict = self.setup_fruit_for_recall_trial(avatar_x_y, trial_type)
# print 'fruit'
self.change_positions(new_pos_dict)
# print 'move fruit'
return remember
def setup_recall_trial(self, trial_num):
# print('recall_repeat this trial is', self.repeat)
# repeat_recall can be toggled with button press
# only go to new place after have been to old place for at least one
# alpha or invisible showing
# need trial_type for determining if repeating, new manual position
# or new random position:
# need to return remember = True, if banana is not full bright (will be searching)
#
# print 'setup recall trial'
# print self.new_subarea_key
# print('num_shows', self.num_shows)
remember = False
if self.num_shows == self.config['num_repeat_visible']:
# print 'required, first trial after visible repeats'
# first trial after required visible repeats is required,
# always a repeat
# always alpha, if set in config
remember = True
if self.config.get('first_fruit_alpha', True):
trial_type = 'repeat_alpha'
else:
trial_type = 'repeat'
elif self.num_shows == 0:
# first trial, so definitely a new position...
remember, trial_type = self.choose_new_recall_trial_type()
elif self.num_shows < self.config['num_repeat_visible']:
# print 'required bright'
trial_type = 'repeat_bright'
elif self.num_shows > self.config['num_repeat_visible']:
# print 'okay to change areas'
# okay to change position
remember, trial_type = self.choose_new_recall_trial_type()
# print('remember', remember)
# print('trial type', trial_type)
self.num_shows += 1
return remember, trial_type
def choose_new_recall_trial_type(self):
# possible to change position of fruit, check to see if we are,
# and if so, how
# print 'change position?'
# print('self.manual', self.manual, 'self.repeat', self.repeat)
reset_num_shows = True
if self.new_subarea_key and self.manual:
trial_type = 'manual_bright'
elif self.new_subarea_key and not self.manual:
# print 'random and new subarea chosen'
trial_type = 'random_bright'
elif not self.manual and not self.repeat:
# print 'random and not repeating, so force new position'
trial_type = 'random_force_bright'
else:
trial_type = 'repeat'
reset_num_shows = False
if reset_num_shows:
# if resetting num_shows, than new trial,
# so not remembering
self.num_shows = 0
remember = False
else:
remember = True
return remember, trial_type
def setup_fruit_for_recall_trial(self, avatar_x_y, repeat='No'):
# print('repeat_trial_type', repeat)
# print 'setup fruit for trial'
# get positions for fruit
# if repeat has 'repeat' in it, use same positions as before
# (for recall this only applies to the recall fruit)
# if repeat is 'recall' with no repeat, get a new position for
# the recall fruit, from appropriate area
# fruit_list is
# pos_list is used to make sure we are not putting fruit too close
# together or too close to the avatar
pos_list = []
# pos_dict is returned, giving us a dictionary of the new positions
# so that we can move the fruit to the appropriate places.
pos_dict = {}
# make sure start with empty list
self.fruit_list = []
# if we are repeating the recall fruit, need to
# keep track, so random positions are placed
# proper distance from it
if 'repeat' in repeat:
# we have a position saved, so go ahead and
# add it to the starting list, so other fruit
# is not assigned too close to it.
pos_list.append(self.pos_dict[self.config['fruit_to_remember']])
elif repeat == 'manual_bright' or repeat == 'random_bright':
# print 'switch subareas'
# we switched subareas
self.subarea_key = self.new_subarea_key
self.new_subarea_key = None
# if specific x, y, go ahead and add to list
if 'manual' in repeat:
pos_list.append(self.config['points'].get(self.subarea_key))
# get alt_area
if self.config.get('alt_subarea'):
# print 'use alt_subarea'
alt_area = create_alt_fruit_area(alt_subarea=self.config['alt_subarea'])
# print alt_area
else:
# print 'use any area'
alt_area = create_alt_fruit_area(self.subarea_key)
# print pos_list
# print 'avatar pos', avatar_x_y
for name, fruit in self.fruit_models.iteritems():
# print name
# print pos_list
# print('repeat', repeat)
if name == self.config['fruit_to_remember']:
if 'repeat' in repeat:
# print 'repeat the same position for the banana again'
# if not a new position, put in the recall fruit position
# we used previously, already added to pos_list, so good.
(x, y) = self.pos_dict[name]
else:
# print 'new recall fruit position'
# print self.manual
# print self.subarea_key
# getting a new position
# send in config with sub areas
if 'manual' in repeat:
# print('switching placement', self.subarea_key)
# print self.config['points']
(x, y) = self.config['points'].get(self.subarea_key)
else:
# print 'get random'
# print('subarea_key', self.subarea_key)
(x, y) = mB.get_random_xy(pos_list, avatar_x_y, self.config, [self.subarea_key])
pos_list.append((x, y))
# always be ready to repeat recall fruit, cheap
self.pos_dict[name] = (x, y)
# make sure we know to show it, since not a repeat
# print('recall fruit position', x, y)
else:
# fruit not remembering is in alternate area, if we have specified an area for the recall
# fruit
(x, y) = mB.get_random_xy(pos_list, avatar_x_y, self.config, alt_area)
pos_list.append((x, y))
# print pos_list
# print('current positions', name, x, y)
pos_dict[name] = (x, y)
self.make_fruit_visible(name, repeat)
# print pos_dict
# print('fruit list', self.fruit_list)
return pos_dict
def log_new_trial(self, trial_type, trial_num):
stdout.write('trial number ' + str(trial_num) + '\n')
# print('trial_type', trial_type)
VideoLogQueue.VideoLogQueue.getInstance().writeLine("NewTrial", [trial_num])
if trial_type == 'new' or 'repeat' in trial_type:
# print trial_type
# print 'log repeat'
VideoLogQueue.VideoLogQueue.getInstance().writeLine("RepeatTrial", [trial_num])
avatar = Avatar.Avatar.getInstance()
avatar_x_y = (avatar.getPos()[0], avatar.getPos()[1])
return avatar_x_y
def change_positions(self, pos_dict):
for key, value in pos_dict.iteritems():
self.fruit_models[key].setPos(Point3(value[0], value[1], 1))
def make_fruit_visible(self, name, repeat=None):
# print 'choose_first_fruit', choose_first_fruit
# fruit indexes are given one at a time,
# if task is remembering fruit,
# create a list of fruit we will be showing consecutively
# after the first fruit, first fruit is visible
# else (for goBananas) make all fruit visible
recall_fruit = self.config['fruit_to_remember']
if recall_fruit:
if name == recall_fruit:
# print 'make fruit visible (or not)'
# print('alpha', self.alpha)
# decide if we have shown required number of times at full bright.
if 'bright' in repeat:
# first x trials solid on, set in config how many
# print 'on solid'
self.change_alpha_fruit('on')
elif 'alpha' in repeat and self.alpha == 0:
# get alpha from config
# print 'reset recall fruit alpha'
self.alpha = self.config['alpha']
# print('changed alpha', self.alpha)
self.change_alpha_fruit('on_alpha')
elif self.alpha > 0:
# if alpha is on, use alpha
# print 'recall fruit alpha'
self.change_alpha_fruit('on_alpha')
else:
print 'recall fruit invisible'
self.change_alpha_fruit('off_alpha')
self.fruit_list.append(name)
else:
self.fruit_list.append(name)
# print('now alpha', self.alpha)
else:
# go bananas
if name == self.alpha_fruit:
self.change_alpha_fruit('on_alpha', name)
self.fruit_models[name].setStashed(False)
self.fruit_list.append(name)
def set_alpha_fruit(self, name, alpha=None):
# set up fruit to be alpha, done at fruit creation
# may also change alpha immediately, if alpha is true
self.alpha_node_path = self.fruit_models[name].retrNodePath()
self.alpha_node_path.setTransparency(TransparencyAttrib.MAlpha)
if alpha:
# print('make a fruit alpha', name, self.config['alpha'])
self.alpha_node_path.setAlphaScale(self.config['alpha'])
# log it
VideoLogQueue.VideoLogQueue.getInstance().writeLine("Alpha", [name + ' ' + str(self.config['alpha'])])
return False # only do one fruit
def collide_fruit(self, collision_info):
"""
Handle the subject colliding with fruit, document fruit collision, subject
freezes, reward is triggered.
@param collision_info:
@return:
"""
# print 'collision'
# print 'what is first_collision now?', self.first_collision
# which fruit we ran into
self.current_fruit = collision_info[0].getInto().getIdentifier()
# print('collision', self.current_fruit)
# print self.first_collision
# check to see if the banana was in the camera view when collided,
# if not, then ignore collision
collided = collision_info[0].getInto()
cam_node_path = Camera.Camera.getDefaultCamera().retrNodePath()
# print collided.retrNodePath().getPos(cam_node_path)
# print collided.retrNodePath().getPos(cam_node_path)
# print cam_node_path.node().isInView(collided.retrNodePath().getPos(cam_node_path))
# Sometimes we collide with a banana multiple times for no damn reason, so setting self.first_collision
# to keep track of whether this is the first collision
# print('collision', self.first_collision)
# print('camera', Camera.Camera.getDefaultCamera().getPos())
# print('collision position', collided.retrNodePath().getPos(cam_node_path))
# for fruit in self.fruit_models:
# print fruit.getPos()
# print('in view', cam_node_path.node().isInView(collided.retrNodePath().getPos(cam_node_path)))
if cam_node_path.node().isInView(collided.retrNodePath().getPos(cam_node_path)) and self.first_collision:
# print 'first collision, in view'
# print self.current_fruit
# cannot run inside of banana - can't I just do this earlier for all of the fruit?
MovingObject.MovingObject.handleRepelCollision(collision_info)
# print 'stop moving'
# Makes it so Avatar cannot turn or go forward
Avatar.Avatar.getInstance().setMaxTurningSpeed(0)
Avatar.Avatar.getInstance().setMaxForwardSpeed(0)
# VideoLogQueue.VideoLogQueue.getInstance().writeLine("Yummy", ['stop moving!'])
# Setting self.beeps to 0 is signal to give reward
self.beeps = 0
# print self.beeps
self.first_collision = False
def disappear_fruit(self):
# print 'disappear fruit'
# fruit that is currently visible is stashed
# print('fruit should go away', self.current_fruit)
# print self.fruit_list
# remove the current fruit from list of possible fruit
self.fruit_list.remove(self.current_fruit)
# for gobananas, show how many fruit are left on field
if not self.config['fruit_to_remember']:
print('number of fruit left this trial ', len(self.fruit_list))
# print 'removed a fruit from the list', self.fruit_list
# stash the fruit we just ran into,
self.fruit_models[self.current_fruit].setStashed(True)
self.reset_collision()
def reset_collision(self):
# print 'reset collision'
# log collected banana
VideoLogQueue.VideoLogQueue.getInstance().writeLine("Finished", [self.current_fruit])
self.first_collision = True
def get_next_fruit(self):
# only for banana recall (sequential fruit!)
# not used for goBananas
# print 'get next fruit'
# print 'fruit gone', self.current_fruit
self.fruit_models[self.fruit_list[0]].setStashed(False)
# print('fruit unstashed', self.fruit_list[0])
def change_alpha_fruit(self, mode=None, fruit=None):
# fruit default is the recall fruit
# mode can be three states, on, alpha_on, off. Default is alpha_on
if mode is None:
mode = 'alpha_on'
if fruit is None:
fruit = self.config['fruit_to_remember']
# for alpha, we only care if we are turning on alpha or on full,
# if turning off, just leave in same state. Otherwise the logs will
# be confusing with alpha changing when fruit disappears.
if 'alpha' in mode:
# print('should be on at this alpha ', self.alpha)
self.alpha_node_path.setAlphaScale(self.alpha)
# log what alpha we flashed at
print('alpha', self.alpha)
# print('fruit', fruit)
VideoLogQueue.VideoLogQueue.getInstance().writeLine("Alpha",
[fruit + ' ' + str(self.alpha)])
elif 'on' in mode:
# print('should be on at this alpha ', 1)
self.alpha_node_path.setAlphaScale(1)
# log we returned to full alpha, should be also stashed at this point,
# but that is logged automatically
VideoLogQueue.VideoLogQueue.getInstance().writeLine("Alpha",
[fruit + ' 1'])
if 'on' in mode:
# turn it on, should already be at correct alpha
self.fruit_models[fruit].setStashed(False)
else:
self.fruit_models[fruit].setStashed(True)
def choose_recall_position(self, subarea_key):
# print('new subarea key', subarea_key)
self.new_subarea_key = subarea_key
def check_distance_to_fruit(self, target_fruit):
# Zowie, appears to be a panda3d method, but I can't
# get to it, because I don't know how to get to the
# panda actor node. :(
avatar = Avatar.Avatar.getInstance()
avatar_pos = (avatar.getPos()[0], avatar.getPos()[1])
banana = self.fruit_models[target_fruit]
banana_pos = (banana.getPos()[0], banana.getPos()[1])
dist_to_banana = mB.get_distance(avatar_pos, banana_pos)
return dist_to_banana
def move_recall_fruit_to_avatar(self):
# 0 heading for avatar is off by 90 degrees from where one
# would expect it to be, so we rotate 90 degrees
# print 'move fruit in front of avatar'
avatar = Avatar.Avatar.getInstance()
avatar_pos = (avatar.getPos()[0], avatar.getPos()[1])
# print avatar_pos
avatar_head = avatar.getH() + 90
heading = radians(avatar_head)
# print avatar_head
x2 = avatar_pos[0] + cos(heading) * 2
y2 = avatar_pos[1] + sin(heading) * 2
# print x2, y2
if self.config['min_x'] < x2 < self.config['max_x'] and self.config['min_y'] < y2 < self.config['max_y']:
recall_fruit = self.fruit_models[self.config['fruit_to_remember']]
# print recall_fruit.getPos()
z = recall_fruit.getPos()[2]
recall_fruit.setPos(Point3(x2, y2, z))
# print 'new position', recall_fruit.getPos()
|
|
# This program is public domain
# Author: Paul Kienzle
# Initial version: William Ratcliff
"""
ICP data reader.
summary(filename) - reads the header information
read(filename) - reads header information and data
"""
import numpy as N
import datetime,sys
# Try using precompiled matrix loader
print "*** icpformat 1"
try:
from . import _reduction
def parsematrix(s, shape=None, linenum=0):
"""
Parse a string into a matrix. Provide a shape parameter if you
know the expected matrix size.
"""
if shape != None:
# Have an existing block, so we know what size to allocate
z = N.empty(shape,'i')
i,j = _reduction.str2imat(s,z)
if i*j != z.size:
raise IOError,"Inconsistent dims at line %d"%linenum
else:
# No existing block. Worst case is 2 bytes per int.
n = int(len(s)/2+1)
z = N.empty(n,'i')
i,j = _reduction.str2imat(s,z)
# Keep the actual size
if i==1 or j==1:
z = z[:i*j].reshape(i*j)
else:
z = z[:i*j].reshape(i,j)
return z
except:
def parsematrix(s,shape=None,linenum=0):
"""
Parse a string into a matrix. Provide a shape parameter if you
know the expected matrix size.
"""
z = N.matrix(s,'i').A
i,j = z.shape
if i==1 or j==1:
z = z.reshape(i*j)
if shape != None and N.any(z.shape != shape):
raise IOError,"Inconsistent dims at line %d"%linenum
return z
def readdata(fh):
"""
Read ICP data, including PSD data if lines contain commas.
"""
rows = []
blocks = []
line = fh.readline().rstrip()
linenum = 1
while line != '':
# While it might be easy to check for a comment mark on the beginning
# of the line, supporting this is ill-adviced. First, users should
# be strongly discouraged from modifying the original data.
# Second, sequencing against the automatically generated motor
# columns will become more complicated. Let's make life easier
# and put the masking in the application rather than the data reader.
# Process the instrument configuration line and move to the next line
rows.append([float(val) for val in line.split()])
line = fh.readline().rstrip()
linenum += 1
# Build up a multiline detector block by joining all lines that
# contain a comma
b = []
while ',' in line:
b.append(line)
line = fh.readline()
linenum += 1
# If last line ended with a comma then the last number for the
# the current block is on the current line.
if b != [] and b[-1].rstrip()[-1] == ",":
b.append(line)
line = fh.readline()
linenum += 1
if b != []:
# Have a detector block so add it
s = "".join(b)
if blocks != []:
z = parsematrix(s, shape=blocks[0].shape, linenum=linenum)
else:
z = parsematrix(s, shape=None, linenum=linenum)
blocks.append(z)
elif blocks != []:
# Oops...missing a detector block. Set it to zero counts
# of the same size as the last block
blocks.append(N.zeros(blocks[-1].shape,'i'))
# Otherwise no detector block and don't need one
# Note: this strategy fails to identify that the first
# detector block is missing; those will be filled in later.
# recover from missing leading detector blocks
if blocks != [] and len(blocks) < len(rows):
blank = N.zeros(blocks[0].shape,'i')
blocks = [blank]*(len(blocks)-len(rows)) + blocks
# Convert data to arrays
X = N.array(rows, 'd')
Z = N.array(blocks)
return X,Z
def get_tokenized_line(file):
"""
Read the next line of text into a set of words.
"""
line=file.readline()
return line.split()
def get_quoted_tokens(file):
"""
Build a token list from a line which can be a mix of quoted strings
and unquoted values separated by spaces. Uses single quotes only.
Does not test for escaped single quotes.
"""
line = file.readline()
tokens = []
curtoken=None
inquote = False
for c in line:
if c == "'":
if inquote:
tokens.append("".join(curtoken))
curtoken = None
inquote = False
else:
curtoken = []
inquote = True
elif inquote:
curtoken.append(c)
elif c.isspace():
if curtoken != None:
tokens.append("".join(curtoken))
curtoken = None
else:
if curtoken == None:
curtoken = [c]
else:
curtoken.append(c)
return tokens
class Lattice(object):
def __str__(self):
return ("a,b,c=%g,%g,%g alpha,beta,gamma=%g,%g,%g"
% (self.a,self.b,self.c,self.alpha,self.beta,self.gamma))
class Motor(object): pass
class MotorSet(object):
def __str__(self):
motornames = self.__dict__.keys()
motornames.sort()
details = [(m,self.__dict__[m].start,self.__dict__[m].stop)
for m in motornames]
return ", ".join(["%s[%g:%g]"%m for m in details])
class ColumnSet(object):
def __getitem__(self, k):
return getattr(self,k)
def __str__(self):
columnnames = self.__dict__.keys()
columnnames.sort()
return ", ".join(columnnames)
class ICP(object):
def __init__(self, path):
self.path = path
def readheader1(self, file):
"""
Read the tow line summary at the start of the ICP data files.
"""
tokens = get_quoted_tokens(file)
self.filename=tokens[0]
stamp = datetime.datetime(2000,1,1) # need this to call strptime
self.date=stamp.strptime(tokens[1],'%b %d %Y %H:%M')
self.scantype = tokens[2]
self.prefactor = float(tokens[3])
self.monitor=float(tokens[4])
self.count_type=tokens[5]
self.points=int(tokens[6])
self.data_type=tokens[7]
#skip over names of fields
file.readline()
#comment and polarization
line = file.readline()
polarized_index = line.find("F1: O", 52)
if polarized_index > 0:
self.comment = line[:polarized_index].rstrip()
F1 = '+' if line.find("F1: ON", 52)>0 else '-'
F2 = '+' if line.find("F2: ON", 52)>0 else '-'
self.polarization = F1+F2
else:
self.comment = line.rstrip()
self.polarization = ""
def readiheader(self, file):
"""
Read I-buffer structure, excluding motors.
"""
# Read in fields and field names
tokenized=get_tokenized_line(file)
fieldnames = file.readline()
#print tokenized
#print fieldnames
#Collimation Mosaic Wavelength T-Start Incr. H-field #Det
self.collimations = [float(s) for s in tokenized[0:4]]
self.mosaic = [float(s) for s in tokenized[4:7]]
self.wavelength=float(tokenized[7])
self.Tstart=float(tokenized[8])
self.Tstep=float(tokenized[9])
self.Hfield=float(tokenized[10])
def readrheader(self, file):
"""
Read R-buffer structure, excluding motors.
"""
# Read in fields and field names
tokenized=get_tokenized_line(file)
fieldnames = file.readline()
#print tokenized
#print fieldnames
#Mon1 Exp Dm Wavel T-Start Incr. Hf(Tesla) #Det SclFac
self.Mon1=float(tokenized[0])
self.Exp=float(tokenized[1])
self.Dm=float(tokenized[2])
self.wavelength=float(tokenized[3])
self.Tstart=float(tokenized[4])
self.Tstep=float(tokenized[5])
self.Hfield=float(tokenized[6])
self.numDet=float(tokenized[7])
self.SclFac=float(tokenized[8])
def readqheader(self, file):
"""
Read Q-buffer structure (also works for T-buffer).
"""
#experiment info
tokenized=get_tokenized_line(file)
self.collimations=[float(s) for s in tokenized[0:4]]
self.mosaic=[float(s) for s in tokenized[4:7]]
orient1=[float(s) for s in tokenized[7:10]]
#ignore the "angle" field
orient2=[float(s) for s in tokenized[11:14]]
#skip line with field names
file.readline()
tokenized=get_tokenized_line(file)
lattice=Lattice()
lattice.a=float(tokenized[0])
lattice.b=float(tokenized[1])
lattice.c=float(tokenized[2])
lattice.alpha=float(tokenized[3])
lattice.beta=float(tokenized[4])
lattice.gamma=float(tokenized[5])
self.lattice=lattice
#skip line with field names
file.readline()
tokenized=get_tokenized_line(file)
self.ecenter=float(tokenized[0])
self.deltae=float(tokenized[1])
self.ef=float(tokenized[2])
self.monochromator_dspacing=float(tokenized[3])
self.analyzer_dspacing=float(tokenized[4])
self.tstart=float(tokenized[5])
self.tstep=float(tokenized[6])
tokenized=get_tokenized_line(file)
self.Efixed=tokenized[4]
tokenized=get_tokenized_line(file)
self.qcenter=[float(s) for s in tokenized[0:3]]
self.qstep=[float(s) for s in tokenized[3:6]]
self.hfield=float(tokenized[6])
#skip line describing fields
file.readline()
def check_wavelength(self, default, overrides):
"""
ICP sometimes records the incorrect wavelength in the file. Make
sure the right value is being used. Be annoying about it so that
if the wavelength was changed for a legitimate reason the user can
override. L is the value in the file. dectector.wavelength should
already be set to the default for the instrument.
"""
dataset = self.filename[:5]
wavelength = self.wavelength
if dataset in overrides:
# yuck! If already overridden for a particular file in
# a dataset, override for all files in the dataset.
wavelength = overrides[dataset]
message("Using wavelength %s for %s"%(wavelength,dataset))
elif wavelength == 0:
# yuck! If stored value is 0, use the default
wavelength = default
message("Using default wavelength %s for %s"\
%(wavelength,self.path))
elif abs(default-wavelength)/default > 0.01:
# yuck! Value differs significantly from the default
if question("ICP recorded a wavelength of %s in %s. \
Do you want to use the default wavelength %s instead?"\
%(wavelength,self.path,default)):
wavelength = default
# Regardless of how the value was obtained, use that value for
# the entire dataset
return wavelength
def readmotors(self, file):
"""
Read the 6 motor lines, returning a dictionary of
motor names and start-step-stop values.
E.g.,
M = _readmotors(file)
print M['a1'].start
"""
self.motor = MotorSet()
while True: # read until 'Mot:' line
words=get_tokenized_line(file)
if words[0] == 'Mot:': break
motor = Motor()
motor.start=float(words[1])
motor.step=float(words[2])
motor.stop=float(words[3])
name = words[0] if not words[0].isdigit() else 'a'+words[0]
setattr(self.motor,name,motor)
def readcolumnheaders(self, file):
"""
Get a list of column names. Transform the names of certain
columns to make our lives easier elsewhere:
#1 COUNTS -> counts
#2 COUNTS -> counts2
MON -> monitor
MIN -> time
Q(x) -> qx, Q(y) -> qy, Q(z) -> qz
All column names are uppercase.
"""
line = file.readline()
line = line.lower()
for (old,new) in (('#1 counts','counts'),
('#2 counts','counts2'),
(' mon ',' monitor '),
(' min ',' time '),
('(',''),
(')',''),
):
line = line.replace(old,new)
self.columnnames = line.split()
def readcolumns(self, file):
'''
Read and parse ICP data columns listed in columns. Return a dict of
column name: vector. If using a position sensitive detector, return
an array of detector values x scan points.
'''
values,detector = readdata(file)
self.column = ColumnSet()
for (c,v) in zip(self.columnnames,values.T):
setattr(self.column,c,v)
self.detector = detector
self.counts = detector if detector.size > 0 else self.column.counts
self.points = len(self.column.counts)
def genmotorcolumns(self):
"""
Generate vectors for each of the motors if a vector is not
already stored in the file.
"""
if self.scantype in ['T']: return # Skip motor generation for now for 'T'
for (M,R) in self.motor.__dict__.iteritems():
if not hasattr(self.column,M):
if R.step != 0.:
vector = N.arange(R.start,R.step,R.stop)
# truncate to number of points measured
vector = vector[:self.points]+0
else:
vector = R.start * N.ones(self.points)
setattr(self.column,M,vector)
pass
def parseheader(self, file):
"""
Read and parse ICP header information
"""
# Determine FileType
self.readheader1(file)
if self.scantype=='I':
self.readiheader(file)
self.readmotors(file)
elif self.scantype in ['Q','T']:
self.readqheader(file)
elif self.scantype=='B':
self.readqheader(file)
self.readmotors(file)
elif self.scantype=='R':
self.readrheader(file)
self.readmotors(file)
else:
raise ValueError, "Unknown scantype %s in ICP file"%self.scantype
self.readcolumnheaders(file)
def summary(self):
"""
Read header from file, setting the corresponding attributes the ICP object
"""
file = gzopen(self.path)
self.parseheader(file)
data1 = file.readline()
data2 = file.readline()
self.PSD = (',' in data2)
file.close()
def read(self):
"""
Read header and data from file, setting the corresponding attributes the ICP object
"""
file = gzopen(self.path)
self.parseheader(file)
#read columns and detector images if available
self.readcolumns(file)
self.PSD = (self.detector.size>0)
# fill in missing motor columns
self.genmotorcolumns()
file.close()
def __contains__(self, column):
return hasattr(self.column,column)
def counts(self):
if self.detector.size > 1:
return self.detector
else:
return self.column.counts
def write_icp_header(file, icpfile):
raise NotImplemented
def _write_icp_frame(file, frame):
# Round data to the nearest integer
frame = N.asarray(frame+0.5,'uint32')
if frame.ndim == 2:
rows = [ ",".join(str(v) for v in row) for row in frame ]
text = ";".join(rows)
else:
text = ",".join(str(v) for v in frame)
file.write(' ')
offset = 0
while len(text)-offset > 78:
next = offset+78
while text[next] not in ",;":
next -= 1
file.write(text[offset:next+1])
file.write(' '*(78-(next-offset)))
file.write('\n ')
offset = next+1
file.write(text[offset:])
file.write('\n')
def write_icp_data(file, formats, columns, detector=None):
"""
Write the data portion of the icp file.
"""
for i in range(len(columns[0])):
fields = [f%columns[k][i] for k,f in enumerate(formats)]
file.write(' '.join(fields))
file.write('\n')
if detector != None and detector.size > 0:
_write_icp_frame(file,detector[i])
def replace_data(infilename, outfilename, columns, detector=None):
infile = open(infilename,'r')
outfile = open(outfilename, 'w')
# Copy everything to the motor column
while True:
line = infile.readline()
outfile.write(line)
if line.startswith(' Mot:'): break
# Copy column headers
line = infile.readline()
outfile.write(line)
# Guess output format from the first line of the data
line = infile.readline()
formats = []
width = 0
precision = 0
increment_precision = False
in_number = False
for c in line[:-1]:
width += 1
if c == ' ':
if in_number:
formats.append('%'+str(width-1)+'.'+str(precision)+'f')
width = 0
precision = 0
increment_precision = False
in_number = False
elif c == '.':
increment_precision = True
elif c.isdigit():
in_number = True
if increment_precision:
precision+=1
formats.append('%'+str(width)+'.'+str(precision)+'f')
write_icp_data(outfile, formats, columns, detector)
def read(filename):
"""Read an ICP file and return the corresponding ICP file object"""
icp = ICP(filename)
icp.read()
return icp
def summary(filename):
"""Read an ICP file header and return the corresponding ICP file object"""
icp = ICP(filename)
icp.summary()
return icp
def gzopen(filename,mode='r'):
"""
Open file or gzip file
"""
if filename.endswith('.gz'):
import gzip
file = gzip.open(filename, mode)
else:
file = open(filename, mode)
return file
def asdata(icp):
import data
d = data.Data()
d.vlabel = 'Counts'
d.v = icp.counts
d.xlabel = icp.columnnames[0].capitalize()
d.x = icp.column[icp.columnnames[0]]
if len(d.v.shape) > 1:
d.ylabel = 'Pixel'
d.y = N.arange(d.v.shape[0])
return d
def data(filename):
icp = ICP(filename)
icp.read()
return asdata(icp)
# TODO: need message/question functions
def message(text): pass
def question(text): return True
def copy_test():
import sys
if len(sys.argv) < 2:
print "usage: python icpformat.py file"
sys.exit()
filename = sys.argv[1]
icp = ICP(filename)
icp.read()
columns = [icp.column[n] for n in icp.columnnames]
replace_data(filename,'copy.icp',columns,detector=icp.detector)
def demo():
"""
Read and print all command line arguments
"""
import sys
if len(sys.argv) < 2:
print "usage: python icpformat.py file*"
for file in sys.argv[1:]:
fields = read(file)
keys = fields.__dict__.keys()
keys.sort()
for k in keys: print k,getattr(fields,k)
def plot(filename):
"""
Read and print all command line arguments
"""
import pylab
canvas = pylab.gcf().canvas
d = data(filename)
if len(d.v.shape) > 2:
pylab.gca().pcolormesh(d.v[0,:,:])
pylab.xlabel(d.xlabel)
pylab.ylabel(d.ylabel)
elif len(d.v.shape) > 1:
if filename.lower().endswith('bt4'):
offset=1
else:
offset=0
pylab.gca().pcolorfast(d.v[:,offset:])
pylab.xlabel(d.xlabel)
pylab.ylabel(d.ylabel)
else:
pylab.plot(d.x,d.v)
pylab.xlabel(d.xlabel)
pylab.ylabel(d.vlabel)
pylab.show()
def plot_demo():
import sys
if len(sys.argv) != 2:
print "usage: python icpformat.py file"
else:
plot(sys.argv[1])
if __name__=='__main__':
plot_demo()
#demo()
#copy_test()
|
|
import os
import oscar
# Path helper
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
DEBUG = os.environ.get('DEBUG', 'true') != 'false'
SQL_DEBUG = DEBUG
ALLOWED_HOSTS = [
'latest.oscarcommerce.com',
'master.oscarcommerce.com'
]
# This is needed for the hosted version of the sandbox
ADMINS = (
('David Winterbottom', '[email protected]'),
('Michael van Tellingen', '[email protected]'),
)
EMAIL_SUBJECT_PREFIX = '[Oscar sandbox] '
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
# Use a Sqlite database by default
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('DATABASE_NAME', location('db.sqlite')),
'USER': os.environ.get('DATABASE_USER', None),
'PASSWORD': os.environ.get('DATABASE_PASSWORD', None),
'HOST': os.environ.get('DATABASE_HOST', None),
'PORT': os.environ.get('DATABASE_PORT', None),
'ATOMIC_REQUESTS': True
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
USE_TZ = True
TIME_ZONE = 'Europe/London'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# Includes all languages that have >50% coverage in Transifex
# Taken from Django's default setting for LANGUAGES
gettext_noop = lambda s: s
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('en-gb', gettext_noop('British English')),
('el', gettext_noop('Greek')),
('es', gettext_noop('Spanish')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('it', gettext_noop('Italian')),
('ko', gettext_noop('Korean')),
('nl', gettext_noop('Dutch')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = location('public/static')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = (
location('static/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
location('_site/templates'),
oscar.OSCAR_MAIN_TEMPLATE_DIR,
],
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
],
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
],
'debug': DEBUG,
}
}
]
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# Allow languages to be selected
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# Ensure a valid basket is added to the request instance for every request
'oscar.apps.basket.middleware.BasketMiddleware',
# Enable the ProfileMiddleware, then add ?cprofile to any
# URL path to print out profile details
#'oscar.profiling.middleware.ProfileMiddleware',
)
ROOT_URLCONF = 'urls'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s',
},
'simple': {
'format': '[%(asctime)s] %(message)s'
},
},
'root': {
'level': 'DEBUG',
'handlers': ['console'],
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'oscar': {
'level': 'DEBUG',
'propagate': True,
},
'oscar.catalogue.import': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'oscar.alerts': {
'handlers': ['null'],
'level': 'INFO',
'propagate': False,
},
# Django loggers
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'level': 'WARNING',
'propagate': True,
},
# Third party
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sorl.thumbnail': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django_extensions',
# Debug toolbar + extensions
'debug_toolbar',
'apps.gateway', # For allowing dashboard access
'widget_tweaks',
] + oscar.get_core_apps()
# Add Oscar's custom auth backend so users can sign in using their email
# address.
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/'
APPEND_SLASH = True
# ====================
# Messages contrib app
# ====================
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': location('whoosh_index'),
},
}
# Here's a sample Haystack config if using Solr (which is recommended)
#HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
# 'URL': u'http://127.0.0.1:8983/solr/oscar_latest/',
# 'INCLUDE_SPELLING': True
# },
#}
# =============
# Debug Toolbar
# =============
# Implicit setup can often lead to problems with circular imports, so we
# explicitly wire up the toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
INTERNAL_IPS = ['127.0.0.1', '::1']
# ==============
# Oscar settings
# ==============
from oscar.defaults import *
# Meta
# ====
OSCAR_SHOP_TAGLINE = 'Sandbox'
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
OSCAR_ALLOW_ANON_CHECKOUT = True
# This is added to each template context by the core context processor. It is
# useful for test/stage/qa sites where you want to show the version of the site
# in the page title.
DISPLAY_VERSION = False
# Order processing
# ================
# Sample order/line status settings. This is quite simplistic. It's like you'll
# want to override the set_status method on the order object to do more
# sophisticated things.
OSCAR_INITIAL_ORDER_STATUS = 'Pending'
OSCAR_INITIAL_LINE_STATUS = 'Pending'
# This dict defines the new order statuses than an order can move to
OSCAR_ORDER_STATUS_PIPELINE = {
'Pending': ('Being processed', 'Cancelled',),
'Being processed': ('Complete', 'Cancelled',),
'Cancelled': (),
'Complete': (),
}
# This dict defines the line statuses that will be set when an order's status
# is changed
OSCAR_ORDER_STATUS_CASCADE = {
'Being processed': 'Being processed',
'Cancelled': 'Cancelled',
'Complete': 'Shipped',
}
# LESS/CSS
# ========
# We default to using CSS files, rather than the LESS files that generate them.
# If you want to develop Oscar's CSS, then set USE_LESS=True to enable the
# on-the-fly less processor.
USE_LESS = False
# Sentry
# ======
if os.environ.get('SENTRY_DSN'):
RAVEN_CONFIG = {'dsn': os.environ.get('SENTRY_DSN')}
LOGGING['handlers']['sentry'] = {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
}
LOGGING['root']['handlers'].append('sentry')
INSTALLED_APPS.append('raven.contrib.django.raven_compat')
# Sorl
# ====
THUMBNAIL_DEBUG = True
THUMBNAIL_KEY_PREFIX = 'oscar-sandbox'
# Django 1.6 has switched to JSON serializing for security reasons, but it does not
# serialize Models. We should resolve this by extending the
# django/core/serializers/json.Serializer to have the `dumps` function. Also
# in tests/config.py
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# Try and import local settings which can be used to override any of the above.
try:
from settings_local import *
except ImportError:
pass
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import importutils
import six
import testtools
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import mistral as client
from heat.engine import resource
from heat.engine import resources
from heat.engine.resources.openstack.mistral import workflow
from heat.engine.resources import signal_responder
from heat.engine.resources import stack_user
from heat.engine import scheduler
from heat.engine import stack as stack_parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
mistral_client = importutils.try_import('mistralclient.api.base')
executions = importutils.try_import('mistralclient.api.v2.executions')
workflow_template = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: direct
tasks:
- name: hello
action: std.echo output='Good morning!'
publish:
result: <% $.hello %>
"""
workflow_template_with_params = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
params: {'test':'param_value'}
type: direct
tasks:
- name: hello
action: std.echo output='Good morning!'
publish:
result: <% $.hello %>
"""
workflow_template_with_params_override = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
params: {'test':'param_value_override','test1':'param_value_override_1'}
type: direct
tasks:
- name: hello
action: std.echo output='Good morning!'
publish:
result: <% $.hello %>
"""
workflow_template_full = """
heat_template_version: 2013-05-23
resources:
create_vm:
type: OS::Mistral::Workflow
properties:
name: create_vm
type: direct
input:
name: create_test_server
image: 31d8eeaf-686e-4e95-bb27-765014b9f20b
flavor: 2
output:
vm_id: <% $.vm_id %>
tasks:
- name: create_server
action: |
nova.servers_create name=<% $.name %> image=<% $.image %>
flavor=<% $.flavor %>
publish:
vm_id: <% $.create_server.id %>
on_success:
- check_server_exists
- name: check_server_exists
action: nova.servers_get server=<% $.vm_id %>
publish:
server_exists: True
on_success:
- wait_instance
- name: wait_instance
action: nova.servers_find id=<% $.vm_id %> status='ACTIVE'
policies:
retry:
delay: 5
count: 15
"""
workflow_template_bad = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: direct
tasks:
- name: second_task
action: std.noop
requires: [first_task]
- name: first_task
action: std.noop
"""
workflow_template_bad_reverse = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: reverse
tasks:
- name: second_task
action: std.noop
requires: [first_task]
- name: first_task
action: std.noop
"""
workflow_template_update_replace = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
name: hello_action
type: direct
tasks:
- name: hello
action: std.echo output='Good evening!'
publish:
result: <% $.hello %>
"""
workflow_template_update = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: direct
description: just testing workflow resource
tasks:
- name: hello
action: std.echo output='Good evening!'
publish:
result: <% $.hello %>
"""
class FakeWorkflow(object):
def __init__(self, name):
self.name = name
class MistralWorkFlowTestResource(workflow.Workflow):
@classmethod
def is_service_available(cls, context):
return True
class TestMistralWorkflow(common.HeatTestCase):
def setUp(self):
super(TestMistralWorkflow, self).setUp()
resources.initialise()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
tmpl = template_format.parse(workflow_template)
self.stack = utils.parse_stack(tmpl, stack_name='test_stack')
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['workflow']
self.mistral = mock.Mock()
self.patchobject(MistralWorkFlowTestResource, 'mistral',
return_value=self.mistral)
self.patches = []
self.patches.append(mock.patch.object(stack_user.StackUser,
'_create_user'))
self.patches.append(mock.patch.object(signal_responder.SignalResponder,
'_create_keypair'))
self.patches.append(mock.patch.object(client,
'mistral_base'))
self.patches.append(mock.patch.object(client.MistralClientPlugin,
'_create'))
for patch in self.patches:
patch.start()
self.client = client.MistralClientPlugin(self.ctx)
def tearDown(self):
super(TestMistralWorkflow, self).tearDown()
for patch in self.patches:
patch.stop()
def _create_resource(self, name, snippet, stack):
wf = MistralWorkFlowTestResource(name, snippet, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]
scheduler.TaskRunner(wf.create)()
return wf
def test_create(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
expected_state = (wf.CREATE, wf.COMPLETE)
self.assertEqual(expected_state, wf.state)
self.assertEqual('test_stack-workflow-b5fiekfci3yc', wf.resource_id)
def test_create_with_name(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
expected_state = (wf.CREATE, wf.COMPLETE)
self.assertEqual(expected_state, wf.state)
self.assertEqual('create_vm', wf.resource_id)
def test_attributes(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
self.assertEqual({'name': 'test_stack-workflow-b5fiekfci3yc',
'input': None}, wf.FnGetAtt('data'))
self.assertEqual([], wf.FnGetAtt('executions'))
def test_direct_workflow_validation_error(self):
error_msg = ("Mistral resource validation error: "
"workflow.properties.tasks.second_task.requires: "
"task second_task contains property 'requires' "
"in case of direct workflow. Only reverse workflows "
"can contain property 'requires'.")
self._test_validation_failed(workflow_template_bad, error_msg)
def test_wrong_params_using(self):
error_msg = ("Mistral resource validation error: "
"workflow.properties.params: 'task_name' is not assigned "
"in 'params' in case of reverse type workflow.")
self._test_validation_failed(workflow_template_bad_reverse, error_msg)
def _test_validation_failed(self, templatem, error_msg):
tmpl = template_format.parse(templatem)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
exc = self.assertRaises(exception.StackValidationFailed,
wf.validate)
self.assertEqual(error_msg, six.text_type(exc))
def test_create_wrong_definition(self):
tmpl = template_format.parse(workflow_template)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
self.mistral.workflows.create.side_effect = Exception('boom!')
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.create))
expected_state = (wf.CREATE, wf.FAILED)
self.assertEqual(expected_state, wf.state)
self.assertIn('Exception: resources.workflow: boom!',
six.text_type(exc))
def test_update_replace(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
t = template_format.parse(workflow_template_update_replace)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_workflow = rsrc_defns['workflow']
new_workflows = [FakeWorkflow('hello_action')]
self.mistral.workflows.update.return_value = new_workflows
self.mistral.workflows.delete.return_value = None
err = self.assertRaises(resource.UpdateReplace,
scheduler.TaskRunner(wf.update,
new_workflow))
msg = 'The Resource workflow requires replacement.'
self.assertEqual(msg, six.text_type(err))
def test_update(self):
wf = self._create_resource('workflow', self.rsrc_defn,
self.stack)
t = template_format.parse(workflow_template_update)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_wf = rsrc_defns['workflow']
self.mistral.workflows.update.return_value = [
FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]
scheduler.TaskRunner(wf.update, new_wf)()
self.assertTrue(self.mistral.workflows.update.called)
self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
def test_update_failed(self):
wf = self._create_resource('workflow', self.rsrc_defn,
self.stack)
t = template_format.parse(workflow_template_update)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_wf = rsrc_defns['workflow']
self.mistral.workflows.update.side_effect = Exception('boom!')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.update, new_wf))
self.assertEqual((wf.UPDATE, wf.FAILED), wf.state)
def test_delete(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
scheduler.TaskRunner(wf.delete)()
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
def test_delete_no_data(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
wf.data_delete('executions')
self.assertEqual([], wf.FnGetAtt('executions'))
scheduler.TaskRunner(wf.delete)()
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
def test_delete_not_found(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
self.mistral.workflows.delete.side_effect = (
self.mistral.mistral_base.APIException(error_code=404))
scheduler.TaskRunner(wf.delete)()
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
@mock.patch.object(resource.Resource, 'client_plugin')
def test_delete_other_errors(self, mock_plugin):
"""We mock client_plugin for returning correct mistral client."""
mock_plugin.return_value = self.client
client.mistral_base.APIException = exception.Error
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
self.mistral.workflows.delete.side_effect = (Exception('boom!'))
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.delete))
self.assertEqual((wf.DELETE, wf.FAILED), wf.state)
self.assertIn('boom!', six.text_type(exc))
def test_resource_mapping(self):
mapping = workflow.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(workflow.Workflow,
mapping['OS::Mistral::Workflow'])
def test_signal_failed(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': {'flavor': '3'}}
self.mistral.executions.create.side_effect = Exception('boom!')
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
self.assertEqual('Exception: resources.create_vm: boom!',
six.text_type(err))
def test_signal_wrong_input_and_params_type(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': '3'}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: resources.create_vm: "
"Signal data error: Input in"
" signal data must be a map, find a <type 'str'>")
self.assertEqual(error_message, six.text_type(err))
details = {'params': '3'}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: resources.create_vm: "
"Signal data error: Params "
"must be a map, find a <type 'str'>")
self.assertEqual(error_message, six.text_type(err))
def test_signal_wrong_input_key(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': {'1': '3'}}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: resources.create_vm: "
"Signal data error: Unknown input 1")
self.assertEqual(error_message, six.text_type(err))
@testtools.skipIf(executions is None,
'Uses the actual mistral client')
def test_signal_and_delete_with_executions(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': {'flavor': '3'}}
execution = mock.Mock()
execution.id = '12345'
# Invoke the real create method (bug 1453539)
exec_manager = executions.ExecutionManager(wf.client('mistral'))
self.mistral.executions.create.side_effect = (
lambda *args, **kw: exec_manager.create(*args, **kw))
self.patchobject(exec_manager, '_create', return_value=execution)
scheduler.TaskRunner(wf.signal, details)()
self.assertEqual({'executions': '12345'}, wf.data())
scheduler.TaskRunner(wf.delete)()
self.assertEqual(1, self.mistral.executions.delete.call_count)
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
def test_workflow_params(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': {'flavor': '3'},
'params': {'test': 'param_value', 'test1': 'param_value_1'}}
execution = mock.Mock()
execution.id = '12345'
self.mistral.executions.create.side_effect = (
lambda *args, **kw: self.verify_params(*args, **kw))
scheduler.TaskRunner(wf.signal, details)()
def test_workflow_params_merge(self):
tmpl = template_format.parse(workflow_template_with_params)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('workflow')]
scheduler.TaskRunner(wf.create)()
details = {'params': {'test1': 'param_value_1'}}
execution = mock.Mock()
execution.id = '12345'
self.mistral.executions.create.side_effect = (
lambda *args, **kw: self.verify_params(*args, **kw))
scheduler.TaskRunner(wf.signal, details)()
def test_workflow_params_override(self):
tmpl = template_format.parse(workflow_template_with_params_override)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('workflow')]
scheduler.TaskRunner(wf.create)()
details = {'params': {'test': 'param_value', 'test1': 'param_value_1'}}
execution = mock.Mock()
execution.id = '12345'
self.mistral.executions.create.side_effect = (
lambda *args, **kw: self.verify_params(*args, **kw))
scheduler.TaskRunner(wf.signal, details)()
def verify_params(self, workflow_name, workflow_input=None, **params):
self.assertEqual({'test': 'param_value', 'test1': 'param_value_1'},
params)
execution = mock.Mock()
execution.id = '12345'
return execution
@testtools.skipIf(mistral_client is not None,
'Tests mistral client not installed')
def test_no_client(self):
tmpl = template.Template((template_format.parse(workflow_template)))
stack = stack_parser.Stack(utils.dummy_context(), 'foo', tmpl)
self.assertRaises(exception.ResourceTypeNotFound, stack.validate)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle
import weakref
import pyarrow as pa
import pytest
class UuidType(pa.ExtensionType):
def __init__(self):
pa.ExtensionType.__init__(self, pa.binary(16))
def __reduce__(self):
return UuidType, ()
class ParamExtType(pa.ExtensionType):
def __init__(self, width):
self.width = width
pa.ExtensionType.__init__(self, pa.binary(width))
def __reduce__(self):
return ParamExtType, (self.width,)
def ipc_write_batch(batch):
stream = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
writer.close()
return stream.getvalue()
def ipc_read_batch(buf):
reader = pa.RecordBatchStreamReader(buf)
return reader.read_next_batch()
def test_ext_type_basics():
ty = UuidType()
assert ty.extension_name == "arrow.py_extension_type"
def test_ext_type__lifetime():
ty = UuidType()
wr = weakref.ref(ty)
del ty
assert wr() is None
def test_ext_type__storage_type():
ty = UuidType()
assert ty.storage_type == pa.binary(16)
assert ty.__class__ is UuidType
ty = ParamExtType(5)
assert ty.storage_type == pa.binary(5)
assert ty.__class__ is ParamExtType
def test_uuid_type_pickle():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
ty = UuidType()
ser = pickle.dumps(ty, protocol=proto)
del ty
ty = pickle.loads(ser)
wr = weakref.ref(ty)
assert ty.extension_name == "arrow.py_extension_type"
del ty
assert wr() is None
def test_ext_type_equality():
a = ParamExtType(5)
b = ParamExtType(6)
c = ParamExtType(6)
assert a != b
assert b == c
d = UuidType()
e = UuidType()
assert a != d
assert d == e
def test_ext_array_basics():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
arr.validate()
assert arr.type is ty
assert arr.storage.equals(storage)
def test_ext_array_lifetime():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
refs = [weakref.ref(ty), weakref.ref(arr), weakref.ref(storage)]
del ty, storage, arr
for ref in refs:
assert ref() is None
def test_ext_array_errors():
ty = ParamExtType(4)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
with pytest.raises(TypeError, match="Incompatible storage type"):
pa.ExtensionArray.from_storage(ty, storage)
def test_ext_array_equality():
storage1 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
storage2 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
storage3 = pa.array([], type=pa.binary(16))
ty1 = UuidType()
ty2 = ParamExtType(16)
a = pa.ExtensionArray.from_storage(ty1, storage1)
b = pa.ExtensionArray.from_storage(ty1, storage2)
assert a.equals(b)
c = pa.ExtensionArray.from_storage(ty1, storage3)
assert not a.equals(c)
d = pa.ExtensionArray.from_storage(ty2, storage1)
assert not a.equals(d)
e = pa.ExtensionArray.from_storage(ty2, storage2)
assert d.equals(e)
f = pa.ExtensionArray.from_storage(ty2, storage3)
assert not d.equals(f)
def test_ext_array_pickling():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
ser = pickle.dumps(arr, protocol=proto)
del ty, storage, arr
arr = pickle.loads(ser)
arr.validate()
assert isinstance(arr, pa.ExtensionArray)
assert arr.type == ParamExtType(3)
assert arr.type.storage_type == pa.binary(3)
assert arr.storage.type == pa.binary(3)
assert arr.storage.to_pylist() == [b"foo", b"bar"]
def example_batch():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
return pa.RecordBatch.from_arrays([arr], ["exts"])
def check_example_batch(batch):
arr = batch.column(0)
assert isinstance(arr, pa.ExtensionArray)
assert arr.type.storage_type == pa.binary(3)
assert arr.storage.to_pylist() == [b"foo", b"bar"]
return arr
def test_ipc():
batch = example_batch()
buf = ipc_write_batch(batch)
del batch
batch = ipc_read_batch(buf)
arr = check_example_batch(batch)
assert arr.type == ParamExtType(3)
def test_ipc_unknown_type():
batch = example_batch()
buf = ipc_write_batch(batch)
del batch
orig_type = ParamExtType
try:
# Simulate the original Python type being unavailable.
# Deserialization should not fail but return a placeholder type.
del globals()['ParamExtType']
batch = ipc_read_batch(buf)
arr = check_example_batch(batch)
assert isinstance(arr.type, pa.UnknownExtensionType)
# Can be serialized again
buf2 = ipc_write_batch(batch)
del batch, arr
batch = ipc_read_batch(buf2)
arr = check_example_batch(batch)
assert isinstance(arr.type, pa.UnknownExtensionType)
finally:
globals()['ParamExtType'] = orig_type
# Deserialize again with the type restored
batch = ipc_read_batch(buf2)
arr = check_example_batch(batch)
assert arr.type == ParamExtType(3)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
import tensorflow as tf
class Seq2SeqTest(tf.test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
_, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_rnn_decoder(
dec_inp, enc_state, cell, num_symbols=4, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell1, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with tf.variable_scope("decoder_symbols_seq2seq"):
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3,
embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4,
num_heads=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.GRUCell(2)
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_attention_decoder(
dec_inp, enc_state, attn_states, cell, num_symbols=4,
embedding_size=2, output_size=3)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp_dict = {}
dec_inp_dict["0"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec_inp_dict["1"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(4)]
dec_symbols_dict = {"0": 5, "1": 6}
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
dec_inp_dict2["1"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(4)]
with tf.variable_scope("other"):
outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [tf.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [tf.constant(i + 0.5, shape=[2, output_classes])
for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
def testModelWithBucketsScopeAndLoss(self):
"""Test that variable scope reuse is not reset after model_with_buckets."""
classes = 10
buckets = [(4, 4), (8, 8)]
with self.test_session():
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24)
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
per_example_loss=per_example_loss)
# Now we construct the copy model.
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False)
# Now check that we did not accidentally set reuse.
self.assertEqual(False, tf.get_variable_scope().reuse)
# Construct one more model with per-example loss.
tf.get_variable_scope().reuse_variables()
_, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True)
# First loss is scalar, the second one is a 1-dimensinal tensor.
self.assertEqual([], losses1[0].get_shape().as_list())
self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = tf.get_variable("proj_w", [24, classes])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes)
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = tf.all_variables()
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = tf.gradients(losses[i], params)
grads, _ = tf.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([tf.initialize_all_variables()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length],
out[:length], o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=tf.Graph()) as sess:
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_enc_timesteps)]
dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)]
targets = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
weights = [tf.constant(1.0, shape=[batch_size])
for i in range(num_dec_timesteps)]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with tf.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
scope_name)
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
dec_op_fp_false, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(tf.initialize_all_variables())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {v.name.split('/', 1)[-1]: v
for v in variables_fp_false}
matched_variables = [(v, v_false_name_dict[v.name.split('/', 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(tf.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false,
{holder: inp for holder, inp in zip(dec_inp_holder_fp_false,
dec_inp_fp_false)})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
tf.test.main()
|
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
def JoinPath(*args):
return os.path.normpath(os.path.join(*args))
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None, compatible_sdks=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
compatible_sdks = compatible_sdks or []
compatible_sdks.sort(key=lambda v: float(v.replace('v', '')), reverse=True)
self.compatible_sdks = compatible_sdks
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def _SetupScriptInternal(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
assert target_arch in ('x86', 'x64'), "target_arch not supported"
# If WindowsSDKDir is set and SetEnv.Cmd exists then we are using the
# depot_tools build tools and should run SetEnv.Cmd to set up the
# environment. The check for WindowsSDKDir alone is not sufficient because
# this is set by running vcvarsall.bat.
sdk_dir = os.environ.get('WindowsSDKDir', '')
setup_path = JoinPath(sdk_dir, 'Bin', 'SetEnv.Cmd')
if self.sdk_based and sdk_dir and os.path.exists(setup_path):
return [setup_path, '/' + target_arch]
is_host_arch_x64 = (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'
)
# For VS2017 (and newer) it's fairly easy
if self.short_name >= '2017':
script_path = JoinPath(self.path,
'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')
# Always use a native executable, cross-compiling if necessary.
host_arch = 'amd64' if is_host_arch_x64 else 'x86'
msvc_target_arch = 'amd64' if target_arch == 'x64' else 'x86'
arg = host_arch
if host_arch != msvc_target_arch:
arg += '_' + msvc_target_arch
return [script_path, arg]
# We try to find the best version of the env setup batch.
vcvarsall = JoinPath(self.path, 'VC', 'vcvarsall.bat')
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and \
is_host_arch_x64:
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [vcvarsall, 'amd64_x86']
else:
# Otherwise, the standard x86 compiler. We don't use VC/vcvarsall.bat
# for x86 because vcvarsall calls vcvars32, which it can only find if
# VS??COMNTOOLS is set, which isn't guaranteed.
return [JoinPath(self.path, 'Common7', 'Tools', 'vsvars32.bat')]
elif target_arch == 'x64':
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express edition and
# we're running on a 64bit OS.
if self.short_name[-1] != 'e' and is_host_arch_x64:
arg = 'amd64'
return [vcvarsall, arg]
def SetupScript(self, target_arch):
script_data = self._SetupScriptInternal(target_arch)
script_path = script_data[0]
if not os.path.exists(script_path):
raise Exception('%s is missing - make sure VC++ tools are installed.' %
script_path)
return script_data
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError as e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
try:
import _winreg as winreg
except ImportError:
import winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2019': VisualStudioVersion('2019',
'Visual Studio 2019',
solution_version='12.00',
project_version='15.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v142',
compatible_sdks=['v8.1', 'v10.0']),
'2017': VisualStudioVersion('2017',
'Visual Studio 2017',
solution_version='12.00',
project_version='15.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v141',
compatible_sdks=['v8.1', 'v10.0']),
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
2017 - Visual Studio 2017 (15)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
'15.0': '2017'
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Microsoft\VisualStudio\SxS\VS7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VS7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version == '15.0':
if os.path.exists(path):
versions.append(_CreateVersion('2017', path))
elif version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('15.0', '14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
'2017': ('15.0',),
'2019': ('16.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import excutils
from oslo_utils import strutils
from manila import exception
from manila.i18n import _
from manila.share.drivers.huawei import constants
class SmartPartition(object):
def __init__(self, helper):
self.helper = helper
def add(self, opts, fsid):
if not strutils.bool_from_string(opts['huawei_smartpartition']):
return
if not opts['partitionname']:
raise exception.InvalidInput(
reason=_('Partition name is None, please set '
'huawei_smartpartition:partitionname in key.'))
partition_id = self.helper._get_partition_id_by_name(
opts['partitionname'])
if not partition_id:
raise exception.InvalidInput(
reason=_('Can not find partition id.'))
self.helper._add_fs_to_partition(fsid, partition_id)
class SmartCache(object):
def __init__(self, helper):
self.helper = helper
def add(self, opts, fsid):
if not strutils.bool_from_string(opts['huawei_smartcache']):
return
if not opts['cachename']:
raise exception.InvalidInput(
reason=_('Illegal value specified for cache.'))
cache_id = self.helper._get_cache_id_by_name(opts['cachename'])
if not cache_id:
raise exception.InvalidInput(
reason=(_('Can not find cache id by cache name %(name)s.')
% {'name': opts['cachename']}))
self.helper._add_fs_to_cache(fsid, cache_id)
class SmartQos(object):
def __init__(self, helper):
self.helper = helper
def create_qos(self, qos, fs_id):
policy_id = None
try:
# Check QoS priority.
if self._check_qos_high_priority(qos):
self.helper.change_fs_priority_high(fs_id)
# Create QoS policy and activate it.
(qos_id, fs_list) = self.helper.find_available_qos(qos)
if qos_id is not None:
self.helper.add_share_to_qos(qos_id, fs_id, fs_list)
else:
policy_id = self.helper.create_qos_policy(qos, fs_id)
self.helper.activate_deactivate_qos(policy_id, True)
except exception.InvalidInput:
with excutils.save_and_reraise_exception():
if policy_id is not None:
self.helper.delete_qos_policy(policy_id)
def _check_qos_high_priority(self, qos):
"""Check QoS priority."""
for key, value in qos.items():
if (key.find('MIN') == 0) or (key.find('LATENCY') == 0):
return True
return False
def delete_qos(self, qos_id):
qos_info = self.helper.get_qos_info(qos_id)
qos_status = qos_info['RUNNINGSTATUS']
if qos_status != constants.STATUS_QOS_INACTIVATED:
self.helper.activate_deactivate_qos(qos_id, False)
self.helper.delete_qos_policy(qos_id)
class SmartX(object):
def __init__(self, helper):
self.helper = helper
def get_smartx_extra_specs_opts(self, opts):
opts = self.get_capabilities_opts(opts, 'dedupe')
opts = self.get_capabilities_opts(opts, 'compression')
opts = self.get_smartprovisioning_opts(opts)
opts = self.get_smartcache_opts(opts)
opts = self.get_smartpartition_opts(opts)
opts = self.get_sectorsize_opts(opts)
qos = self.get_qos_opts(opts)
return opts, qos
def get_capabilities_opts(self, opts, key):
if strutils.bool_from_string(opts[key]):
opts[key] = True
else:
opts[key] = False
return opts
def get_smartprovisioning_opts(self, opts):
thin_provision = opts.get('thin_provisioning')
if (thin_provision is None or
strutils.bool_from_string(thin_provision)):
opts['LUNType'] = constants.ALLOC_TYPE_THIN_FLAG
else:
opts['LUNType'] = constants.ALLOC_TYPE_THICK_FLAG
return opts
def get_smartcache_opts(self, opts):
if strutils.bool_from_string(opts['huawei_smartcache']):
if not opts['cachename']:
raise exception.InvalidInput(
reason=_('Cache name is None, please set '
'huawei_smartcache:cachename in key.'))
else:
opts['cachename'] = None
return opts
def get_smartpartition_opts(self, opts):
if strutils.bool_from_string(opts['huawei_smartpartition']):
if not opts['partitionname']:
raise exception.InvalidInput(
reason=_('Partition name is None, please set '
'huawei_smartpartition:partitionname in key.'))
else:
opts['partitionname'] = None
return opts
def get_sectorsize_opts(self, opts):
value = None
if strutils.bool_from_string(opts.get('huawei_sectorsize')):
value = opts.get('sectorsize')
if not value:
root = self.helper._read_xml()
sectorsize = root.findtext('Filesystem/SectorSize')
if sectorsize:
sectorsize = sectorsize.strip()
value = sectorsize
if value:
if value not in constants.VALID_SECTOR_SIZES:
raise exception.InvalidInput(
reason=(_('Illegal value(%s) specified for sectorsize: '
'set to either 4, 8, 16, 32 or 64.') % value))
else:
opts['sectorsize'] = int(value)
return opts
def get_qos_opts(self, opts):
qos = {}
if not strutils.bool_from_string(opts.get('qos')):
return
for key, value in opts.items():
if (key in constants.OPTS_QOS_VALUE) and value is not None:
if (key.upper() != 'IOTYPE') and (int(value) <= 0):
err_msg = (_('QoS config is wrong. %(key)s'
' must be set greater than 0.')
% {'key': key})
raise exception.InvalidInput(reason=err_msg)
elif ((key.upper() == 'IOTYPE')
and (value not in ['0', '1', '2'])):
raise exception.InvalidInput(
reason=(_('Illegal value specified for IOTYPE: '
'set to either 0, 1, or 2.')))
else:
qos[key.upper()] = value
if len(qos) <= 1 or 'IOTYPE' not in qos:
msg = (_('QoS config is incomplete. Please set more. '
'QoS policy: %(qos_policy)s.')
% {'qos_policy': qos})
raise exception.InvalidInput(reason=msg)
lowerlimit = constants.QOS_LOWER_LIMIT
upperlimit = constants.QOS_UPPER_LIMIT
if (set(lowerlimit).intersection(set(qos))
and set(upperlimit).intersection(set(qos))):
msg = (_('QoS policy conflict, both protection policy and '
'restriction policy are set. '
'QoS policy: %(qos_policy)s ')
% {'qos_policy': qos})
raise exception.InvalidInput(reason=msg)
return qos
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import webob
import webob.exc
from cinder.api import common
from cinder import test
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.TestCase):
"""Unit tests for the `cinder.api.common.limited` method.
This method takes in a list of items and, depending on the 'offset'
and 'limit' GET params, returns a subset or complete set of the given
items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
"""Test offset key works with 0."""
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
"""Test offset key works with a medium sized number."""
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
"""Test offset key works with a number over 1000 (max_limit)."""
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
self.assertEqual(common.limited(self.medium, req), [])
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
"""Test offset key works with a blank offset."""
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""Test offset key works with a BAD offset."""
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
"""Test request with no offset or limit."""
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
"""Test limit of zero."""
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_bad(self):
"""Test with a bad limit."""
req = webob.Request.blank(u'/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_limit_medium(self):
"""Test limit of 10."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
"""Test limit of 3000."""
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
"""Test request with both limit and offset."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
"""Test a max_limit other than 1000."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
"""Test a negative limit."""
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""Test a negative offset."""
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""Unit tests for `cinder.api.common.get_pagination_params` method.
This method takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_nonnumerical_limit(self):
"""Test nonnumerical limit param."""
req = webob.Request.blank('/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_no_params(self):
"""Test no params."""
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
"""Test valid marker param."""
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
"""Test valid limit param."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
"""Test invalid limit param."""
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
"""Test valid limit and marker parameters."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
{'marker': marker, 'limit': 20})
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://www.testsite.com/v1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import common.models.base
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
import common.fields
import uuid
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Facility',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'This is the official name of the facility', unique=True, max_length=100)),
('code', common.fields.SequenceField(help_text=b'A sequential number allocated to each facility', unique=True, editable=False, blank=True)),
('abbreviation', models.CharField(help_text=b'A short name for the facility.', max_length=30, null=True, blank=True)),
('description', models.TextField(help_text=b'A brief summary of the Facility', null=True, blank=True)),
('location_desc', models.TextField(help_text=b'This field allows a more detailed description of how tolocate the facility e.g Joy medical clinic is in Jubilee Plaza7th Floor', null=True, blank=True)),
('number_of_beds', models.PositiveIntegerField(default=0, help_text=b'The number of beds that a facilty has. e.g 0')),
('number_of_cots', models.PositiveIntegerField(default=0, help_text=b'The number of cots that a facility has e.g 0')),
('open_whole_day', models.BooleanField(default=False, help_text=b'Is the facility open 24 hours a day?')),
('open_whole_week', models.BooleanField(default=False, help_text=b'Is the facility open the entire week?')),
('is_classified', models.BooleanField(default=False, help_text=b"Should the facility geo-codes be visible to the public?Certain facilities are kept 'off-the-map'")),
('is_published', models.BooleanField(default=False, help_text=b'COnfirmation by the CHRIO that the facility is okay')),
('attributes', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'facilities',
'permissions': (('view_classified_facilities', 'Can see classified facilities'), ('publish_facilities', 'Can publish facilities')),
},
bases=(common.models.base.SequenceMixin, models.Model),
),
migrations.CreateModel(
name='FacilityApproval',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('comment', models.TextField()),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(to='facilities.Facility')),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityContact',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('contact', models.ForeignKey(to='common.Contact', on_delete=django.db.models.deletion.PROTECT)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(to='facilities.Facility', on_delete=django.db.models.deletion.PROTECT)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityOperationState',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('reason', models.TextField(help_text=b'Additional information for the transition', null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(related_name='facility_operation_states', to='facilities.Facility')),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityRegulationStatus',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('reason', models.TextField(help_text=b'An explanation for as to why is the facility is beingput in the particular status', null=True, blank=True)),
('license_number', models.CharField(help_text=b'The license number that the facility has been given by the regulator', max_length=100, null=True, blank=True)),
('is_confirmed', models.BooleanField(default=False, help_text=b'Has the proposed change been confirmed by higher authorities')),
('is_cancelled', models.BooleanField(default=False, help_text=b'Has the proposed change been cancelled by a higher authority')),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(related_name='regulatory_details', on_delete=django.db.models.deletion.PROTECT, to='facilities.Facility')),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'facility regulation statuses',
},
),
migrations.CreateModel(
name='FacilityService',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('is_confirmed', models.BooleanField(default=False, help_text=b'Indiates whether a service has been approved by the CHRIO')),
('is_cancelled', models.BooleanField(default=False, help_text=b'Indicates whether a service has been cancelled by the CHRIO')),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(to='facilities.Facility')),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityStatus',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'A short name respresenting the operanation status e.g OPERATIONAL', unique=True, max_length=100)),
('description', models.TextField(help_text=b'A short explanation of what the status entails.', null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'facility statuses',
},
),
migrations.CreateModel(
name='FacilityType',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'A short unique name for the facility type e.g DISPENSARY', unique=True, max_length=100)),
('sub_division', models.CharField(help_text=b'This is a further division of the facility type e.g Hospitals can be further divided into District hispitals and Provincial Hospitals.', max_length=100, null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityUnit',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField(help_text=b'A short summary of the facility unit.')),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(to='facilities.Facility', on_delete=django.db.models.deletion.PROTECT)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityUpgrade',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('reason', models.TextField()),
('is_confirmed', models.BooleanField(default=False, help_text=b'Indicates whether a facility upgrade or downgrade has been confirmed')),
('is_cancelled', models.BooleanField(default=False, help_text=b'Indicates whether a facility upgrade or downgrade has beencancelled or not')),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(related_name='facility_upgrades', to='facilities.Facility')),
('facility_type', models.ForeignKey(to='facilities.FacilityType')),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='JobTitle',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'A short name for the job title', max_length=100)),
('description', models.TextField(help_text=b'A short summary of the job title', null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='Officer',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'the name of the officer in-charge e.g Roselyne Wiyanga ', max_length=255)),
('id_number', models.CharField(help_text=b'The National Identity number of the officer', max_length=10, null=True, blank=True)),
('registration_number', models.CharField(help_text=b'This is the licence number of the officer. e.g for a nurse use the NCK registration number.', max_length=100, null=True, blank=True)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'officers in charge',
},
),
migrations.CreateModel(
name='OfficerContact',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='common.Contact', help_text=b'The contact of the officer incharge may it be email, mobile number etc')),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('officer', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='facilities.Officer', help_text=b'The is the officer in charge')),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='Option',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('value', models.TextField()),
('display_text', models.CharField(max_length=30)),
('is_exclusive_option', models.BooleanField(default=True)),
('option_type', models.CharField(max_length=12, choices=[(b'BOOLEAN', b'Yes/No or True/False responses'), (b'INTEGER', b'Integral numbers e.g 1,2,3'), (b'DECIMAL', b'Decimal numbers, may have a fraction e.g 3.14'), (b'TEXT', b'Plain text')])),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='Owner',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'The name of owner e.g Ministry of Health.', unique=True, max_length=100)),
('description', models.TextField(help_text=b'A brief summary of the owner.', null=True, blank=True)),
('code', common.fields.SequenceField(help_text=b'A unique number to identify the owner.Could be up to 7 characteres long.', unique=True, editable=False, blank=True)),
('abbreviation', models.CharField(help_text=b'Short form of the name of the owner e.g Ministry of health could be shortened as MOH', max_length=30, null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
bases=(models.Model, common.models.base.SequenceMixin),
),
migrations.CreateModel(
name='OwnerType',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'Short unique name for a particular type of owners. e.g INDIVIDUAL', max_length=100)),
('description', models.TextField(help_text=b'A brief summary of the particular type of owner.', null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='RegulatingBody',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'The name of the regulating body', unique=True, max_length=100)),
('abbreviation', models.CharField(help_text=b'A shortform of the name of the regulating body e.g NursingCouncil of Kenya could be abbreviated as NCK.', max_length=50, null=True, blank=True)),
('regulation_verb', models.CharField(max_length=100)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'regulating bodies',
},
),
migrations.CreateModel(
name='RegulatingBodyContact',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('contact', models.ForeignKey(to='common.Contact')),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('regulating_body', models.ForeignKey(to='facilities.RegulatingBody')),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='RegulationStatus',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'A short unique name representing a state/stage of regulation e.g. PENDING_OPENING ', unique=True, max_length=100)),
('description', models.TextField(help_text=b"A short description of the regulation state or state e.gPENDING_LINCENSING could be descriped as 'waiting for the license tobegin operating' ", null=True, blank=True)),
('is_initial_state', models.BooleanField(default=False, help_text=b'Indicates whether it is the very first statein the regulation workflow.')),
('is_final_state', models.BooleanField(default=False, help_text=b'Indicates whether it is the last state in the regulation work-flow')),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('next_status', models.ForeignKey(related_name='next_state', blank=True, to='facilities.RegulationStatus', help_text=b'The regulation_status suceedding this regulation status.', null=True)),
('previous_status', models.ForeignKey(related_name='previous_state', blank=True, to='facilities.RegulationStatus', help_text=b'The regulation_status preceding this regulation status.', null=True)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'regulation_statuses',
},
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(unique=True, max_length=255)),
('description', models.TextField(null=True, blank=True)),
('abbreviation', models.CharField(help_text=b'A short form for the service e.g FANC for Focused Antenatal Care', max_length=50, null=True, blank=True)),
('code', common.fields.SequenceField(unique=True, editable=False, blank=True)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'services',
},
bases=(common.models.base.SequenceMixin, models.Model),
),
migrations.CreateModel(
name='ServiceCategory',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('name', models.CharField(help_text=b'What is the name of the category? ', max_length=100)),
('description', models.TextField(null=True, blank=True)),
('abbreviation', models.CharField(help_text=b'A short form of the category e.g ANC for antenatal', max_length=50, null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
'verbose_name_plural': 'service categories',
},
),
migrations.CreateModel(
name='ServiceOption',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('option', models.ForeignKey(to='facilities.Option')),
('service', models.ForeignKey(to='facilities.Service')),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.CreateModel(
name='ServiceRating',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('cleanliness', models.BooleanField(default=True)),
('attitude', models.BooleanField(default=True)),
('will_return', models.BooleanField(default=True)),
('occupation', models.CharField(max_length=100)),
('comment', models.TextField(null=True, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('facility_service', models.ForeignKey(to='facilities.FacilityService')),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
),
migrations.AddField(
model_name='service',
name='category',
field=models.ForeignKey(help_text=b'The classification that the service lies in.', to='facilities.ServiceCategory'),
),
migrations.AddField(
model_name='service',
name='created_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='service',
name='options',
field=models.ManyToManyField(to='facilities.Option', through='facilities.ServiceOption'),
),
migrations.AddField(
model_name='service',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='regulatingbody',
name='contacts',
field=models.ManyToManyField(to='common.Contact', through='facilities.RegulatingBodyContact'),
),
migrations.AddField(
model_name='regulatingbody',
name='created_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='regulatingbody',
name='regulatory_body_type',
field=models.ForeignKey(blank=True, to='facilities.OwnerType', help_text=b'Show the kind of institutions that the body regulates e.gprivate facilities', null=True),
),
migrations.AddField(
model_name='regulatingbody',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='owner',
name='owner_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='facilities.OwnerType', help_text=b'The classification of the owner e.g INDIVIDUAL'),
),
migrations.AddField(
model_name='owner',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='officer',
name='contacts',
field=models.ManyToManyField(help_text=b'Personal contacts of the officer in charge', to='common.Contact', through='facilities.OfficerContact'),
),
migrations.AddField(
model_name='officer',
name='created_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='officer',
name='job_title',
field=models.ForeignKey(to='facilities.JobTitle', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='officer',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='facilityunit',
name='regulating_body',
field=models.ForeignKey(to='facilities.RegulatingBody'),
),
migrations.AddField(
model_name='facilityunit',
name='regulation_status',
field=models.ForeignKey(to='facilities.RegulationStatus'),
),
migrations.AddField(
model_name='facilityunit',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='facilityservice',
name='selected_option',
field=models.ForeignKey(to='facilities.ServiceOption'),
),
migrations.AddField(
model_name='facilityservice',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='facilityregulationstatus',
name='regulating_body',
field=models.ForeignKey(to='facilities.RegulatingBody', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='facilityregulationstatus',
name='regulation_status',
field=models.ForeignKey(to='facilities.RegulationStatus', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='facilityregulationstatus',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='facilityoperationstate',
name='operation_status',
field=models.ForeignKey(help_text=b'Indicates whether the facilityhas been approved to operate, is operating, is temporarilynon-operational, or is closed down', to='facilities.FacilityStatus'),
),
migrations.AddField(
model_name='facilityoperationstate',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='facility',
name='contacts',
field=models.ManyToManyField(help_text=b'Facility contacts - email, phone, fax, postal etc', to='common.Contact', through='facilities.FacilityContact'),
),
migrations.AddField(
model_name='facility',
name='created_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='facility',
name='facility_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='facilities.FacilityType', help_text=b'This depends on who owns the facilty. For MOH facilities,type is the gazetted classification of the facilty.For Non-MOH check under the respective owners.'),
),
migrations.AddField(
model_name='facility',
name='officer_in_charge',
field=models.ForeignKey(blank=True, to='facilities.Officer', help_text=b'The officer in charge of the facility', null=True),
),
migrations.AddField(
model_name='facility',
name='operation_status',
field=models.ForeignKey(blank=True, to='facilities.FacilityStatus', help_text=b'Indicates whether the facilityhas been approved to operate, is operating, is temporarilynon-operational, or is closed down', null=True),
),
migrations.AddField(
model_name='facility',
name='owner',
field=models.ForeignKey(help_text=b'A link to the organization that owns the facility', to='facilities.Owner'),
),
migrations.AddField(
model_name='facility',
name='parent',
field=models.ForeignKey(blank=True, to='facilities.Facility', help_text=b'Indicates the umbrella facility of a facility', null=True),
),
migrations.AddField(
model_name='facility',
name='physical_address',
field=models.ForeignKey(blank=True, to='common.PhysicalAddress', help_text=b'Postal and courier addressing for the facility', null=True),
),
migrations.AddField(
model_name='facility',
name='updated_by',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='facility',
name='ward',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='common.Ward', help_text=b'County ward in which the facility is located'),
),
migrations.AlterUniqueTogether(
name='facilitytype',
unique_together=set([('name', 'sub_division')]),
),
]
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
ret_str = '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
logging.debug('client key:%s' % ret_str)
return ret_str
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
logging.debug('in udp relay init, conf %s ' % config)
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
# to redirect
self.rewrite_port_list = {
13099:True,
8382:True,
13098:True,
13097:True,
13096:True}
def _get_a_server(self):
logging.debug('udp _get_a_server')
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
logging.debug('udp _close_client')
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
logging.debug('udp _handle_server')
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
should_rewrite = False
is_trail_port = self.rewrite_port_list.get(self._config['server_port'],\
False)
if is_trail_port:
should_rewrite = True
logging.debug('udp. server port[%d] should rewrite' % \
self._config['server_port'])
header_result = parse_header(data, should_rewrite)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
logging.debug('udp _handle_client')
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
logging.debug('udp add_to_loop')
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
logging.debug('udp handle_event')
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
logging.debug('udp handle_periodic')
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova common internal object model"""
import collections
import contextlib
import copy
import datetime
import functools
import traceback
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovoo_base
import six
from nova import context
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.objects import fields as obj_fields
from nova.openstack.common import versionutils
from nova import utils
LOG = logging.getLogger('object')
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_' + name
def make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in six.iteritems(cls.fields):
if not isinstance(field, obj_fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr})
raise
def deleter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
raise AttributeError('No such attribute `%s' % name)
delattr(self, get_attrname(name))
setattr(cls, name, property(getter, setter, deleter))
class NovaObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This means this is a base class using the metaclass. I.e.,
# the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
return
def _vers_tuple(obj):
return tuple([int(x) for x in obj.VERSION.split(".")])
# Add the subclass to NovaObject._obj_classes. If the
# same version already exists, replace it. Otherwise,
# keep the list with newest version first.
make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(cls._obj_classes[obj_name]):
if cls.VERSION == obj.VERSION:
cls._obj_classes[obj_name][i] = cls
# Update nova.objects with this newer class.
setattr(objects, obj_name, cls)
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
cls._obj_classes[obj_name].insert(i, cls)
if i == 0:
# Later version than we've seen before. Update
# nova.objects.
setattr(objects, obj_name, cls)
break
else:
cls._obj_classes[obj_name].append(cls)
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen. Update nova.objects
# only if it's the first time we've seen this object name.
if not hasattr(objects, obj_name):
setattr(objects, obj_name, cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
if args and isinstance(args[0], context.RequestContext):
raise exception.ObjectActionError(
action=fn.__name__,
reason='Calling remotables with context is deprecated')
if self._context is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
self._context, self, fn.__name__, args, kwargs)
for key, value in six.iteritems(updates):
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since NovaObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, NovaObject):
setattr(self, key, value)
else:
setattr(self, key,
field.from_primitive(self, key, value))
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
@six.add_metaclass(NovaObjectMetaclass)
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': fields.IntegerField(),
# 'bar': fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
return '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
@classmethod
def obj_name(cls):
"""Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in cls._obj_classes:
LOG.error(_LE('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
versionutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = cls._obj_classes[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = primitive['nova_object.data']
changes = primitive.get('nova_object.changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def obj_calculate_child_version(self, target_version, child):
"""Calculate the appropriate version for a child object.
This is to be used when backporting an object for an older client.
A sub-object will need to be backported to a suitable version for
the client as well, and this method will calculate what that
version should be, based on obj_relationships.
:param target_version: Version this object is being backported to
:param child: The child field for which the appropriate version
is to be calculated
:returns: None if the child should be omitted from the backport,
otherwise, the version to which the child should be
backported
"""
target_version = utils.convert_version_to_tuple(target_version)
for index, versions in enumerate(self.obj_relationships[child]):
my_version, child_version = versions
my_version = utils.convert_version_to_tuple(my_version)
if target_version < my_version:
if index == 0:
# We're backporting to a version from before this
# subobject was added: delete it from the primitive.
return None
else:
# We're in the gap between index-1 and index, so
# backport to the older version
return self.obj_relationships[child][index - 1][1]
elif target_version == my_version:
# This is the first mapping that satisfies the
# target_version request: backport the object.
return child_version
# No need to backport, as far as we know, so return the latest
# version of the sub-object we know about
return self.obj_relationships[child][-1][1]
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
def _do_backport(to_version):
obj = getattr(self, field)
if obj is None:
return
if isinstance(obj, NovaObject):
if to_version != primitive[field]['nova_object.version']:
obj.obj_make_compatible(
primitive[field]['nova_object.data'],
to_version)
primitive[field]['nova_object.version'] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible(
primitive[field][i]['nova_object.data'],
to_version)
primitive[field][i]['nova_object.version'] = to_version
child_version = self.obj_calculate_child_version(target_version, field)
if child_version is None:
del primitive[field]
else:
_do_backport(child_version)
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
of the object
:raises: nova.exception.UnsupportedObjectError if conversion
is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (obj_fields.ObjectField,
obj_fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
if key not in self.obj_relationships:
# NOTE(danms): This is really a coding error and shouldn't
# happen unless we miss something
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % key)
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_to_primitive(self, target_version=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
if target_version:
self.obj_make_compatible(primitive, target_version)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': target_version or self.VERSION,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != obj_fields.UnspecifiedDefault]
for attr in attrs:
default = copy.deepcopy(self.fields[attr].default)
if default is obj_fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
if not self.obj_attr_is_set(attr):
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should use self._conductor, and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError(_('Cannot save anything in the base class'))
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set(self._changed_fields)
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), NovaObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
NOTE: This is NOT "revert to previous values"
NOTE: Specifying fields on recursive resets will only be
honored at the top level. Everything below the top
will reset all.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, get_attrname(attrname))
@property
def obj_fields(self):
return self.fields.keys() + self.obj_extra_fields
# NOTE(danms): This is nova-specific, so don't copy this to o.vo
@contextlib.contextmanager
def obj_alternate_context(self, context):
original_context = self._context
self._context = context
try:
yield
finally:
self._context = original_context
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class NovaObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
def keys(self):
return list(self)
class NovaTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(ovoo_base.ObjectListBase):
# NOTE(danms): These are for transition to using the oslo
# base object and can be removed when we move to it.
@classmethod
def _obj_primitive_key(cls, field):
return 'nova_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except exception.IncompatibleObjectVersion as e:
objver = objprim['nova_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['nova_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
objinst = self.conductor.object_backport(context, objprim,
e.kwargs['supported'])
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
args = [timeutils.strtime(at=arg) if isinstance(arg, datetime.datetime)
else arg for arg in args]
for k, v in six.iteritems(kwargs):
if k == 'exc_val' and v:
kwargs[k] = str(v)
elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
kwargs[k] = ''.join(traceback.format_tb(v))
elif isinstance(v, datetime.datetime):
kwargs[k] = timeutils.strtime(at=v)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import threading
import fasteners
from taskflow import exceptions as exc
from taskflow import flow
from taskflow import logging
from taskflow import task
from taskflow.types import graph as gr
from taskflow.types import tree as tr
from taskflow.utils import iter_utils
from taskflow.utils import misc
LOG = logging.getLogger(__name__)
_RETRY_EDGE_DATA = {
flow.LINK_RETRY: True,
}
_EDGE_INVARIANTS = (flow.LINK_INVARIANT, flow.LINK_MANUAL, flow.LINK_RETRY)
_EDGE_REASONS = flow.LINK_REASONS
class Compilation(object):
"""The result of a compilers compile() is this *immutable* object."""
def __init__(self, execution_graph, hierarchy):
self._execution_graph = execution_graph
self._hierarchy = hierarchy
@property
def execution_graph(self):
"""The execution ordering of atoms (as a graph structure)."""
return self._execution_graph
@property
def hierarchy(self):
"""The hierachy of patterns (as a tree structure)."""
return self._hierarchy
def _add_update_edges(graph, nodes_from, nodes_to, attr_dict=None):
"""Adds/updates edges from nodes to other nodes in the specified graph.
It will connect the 'nodes_from' to the 'nodes_to' if an edge currently
does *not* exist (if it does already exist then the edges attributes
are just updated instead). When an edge is created the provided edge
attributes dictionary will be applied to the new edge between these two
nodes.
"""
# NOTE(harlowja): give each edge its own attr copy so that if it's
# later modified that the same copy isn't modified...
for u in nodes_from:
for v in nodes_to:
if not graph.has_edge(u, v):
if attr_dict:
graph.add_edge(u, v, attr_dict=attr_dict.copy())
else:
graph.add_edge(u, v)
else:
# Just update the attr_dict (if any).
if attr_dict:
graph.add_edge(u, v, attr_dict=attr_dict.copy())
class Linker(object):
"""Compiler helper that adds pattern(s) constraints onto a graph."""
@staticmethod
def _is_not_empty(graph):
# Returns true if the given graph is *not* empty...
return graph.number_of_nodes() > 0
@staticmethod
def _find_first_decomposed(node, priors,
decomposed_members, decomposed_filter):
# How this works; traverse backwards and find only the predecessor
# items that are actually connected to this entity, and avoid any
# linkage that is not directly connected. This is guaranteed to be
# valid since we always iter_links() over predecessors before
# successors in all currently known patterns; a queue is used here
# since it is possible for a node to have 2+ different predecessors so
# we must search back through all of them in a reverse BFS order...
#
# Returns the first decomposed graph of those nodes (including the
# passed in node) that passes the provided filter
# function (returns none if none match).
frontier = collections.deque([node])
# NOTE(harowja): None is in this initial set since the first prior in
# the priors list has None as its predecessor (which we don't want to
# look for a decomposed member of).
visited = set([None])
while frontier:
node = frontier.popleft()
if node in visited:
continue
node_graph = decomposed_members[node]
if decomposed_filter(node_graph):
return node_graph
visited.add(node)
# TODO(harlowja): optimize this more to avoid searching through
# things already searched...
for (u, v) in reversed(priors):
if node == v:
# Queue its predecessor to be searched in the future...
frontier.append(u)
else:
return None
def apply_constraints(self, graph, flow, decomposed_members):
# This list is used to track the links that have been previously
# iterated over, so that when we are trying to find a entry to
# connect to that we iterate backwards through this list, finding
# connected nodes to the current target (lets call it v) and find
# the first (u_n, or u_n - 1, u_n - 2...) that was decomposed into
# a non-empty graph. We also retain all predecessors of v so that we
# can correctly locate u_n - 1 if u_n turns out to have decomposed into
# an empty graph (and so on).
priors = []
# NOTE(harlowja): u, v are flows/tasks (also graph terminology since
# we are compiling things down into a flattened graph), the meaning
# of this link iteration via iter_links() is that u -> v (with the
# provided dictionary attributes, if any).
for (u, v, attr_dict) in flow.iter_links():
if not priors:
priors.append((None, u))
v_g = decomposed_members[v]
if not v_g.number_of_nodes():
priors.append((u, v))
continue
invariant = any(attr_dict.get(k) for k in _EDGE_INVARIANTS)
if not invariant:
# This is a symbol *only* dependency, connect
# corresponding providers and consumers to allow the consumer
# to be executed immediately after the provider finishes (this
# is an optimization for these types of dependencies...)
u_g = decomposed_members[u]
if not u_g.number_of_nodes():
# This must always exist, but incase it somehow doesn't...
raise exc.CompilationFailure(
"Non-invariant link being created from '%s' ->"
" '%s' even though the target '%s' was found to be"
" decomposed into an empty graph" % (v, u, u))
for u in u_g.nodes_iter():
for v in v_g.nodes_iter():
# This is using the intersection() method vs the &
# operator since the latter doesn't work with frozen
# sets (when used in combination with ordered sets).
#
# If this is not done the following happens...
#
# TypeError: unsupported operand type(s)
# for &: 'frozenset' and 'OrderedSet'
depends_on = u.provides.intersection(v.requires)
if depends_on:
edge_attrs = {
_EDGE_REASONS: frozenset(depends_on),
}
_add_update_edges(graph,
[u], [v],
attr_dict=edge_attrs)
else:
# Connect nodes with no predecessors in v to nodes with no
# successors in the *first* non-empty predecessor of v (thus
# maintaining the edge dependency).
match = self._find_first_decomposed(u, priors,
decomposed_members,
self._is_not_empty)
if match is not None:
_add_update_edges(graph,
match.no_successors_iter(),
list(v_g.no_predecessors_iter()),
attr_dict=attr_dict)
priors.append((u, v))
class _TaskCompiler(object):
"""Non-recursive compiler of tasks."""
@staticmethod
def handles(obj):
return isinstance(obj, task.BaseTask)
def compile(self, task, parent=None):
graph = gr.DiGraph(name=task.name)
graph.add_node(task)
node = tr.Node(task)
if parent is not None:
parent.add(node)
return graph, node
class _FlowCompiler(object):
"""Recursive compiler of flows."""
@staticmethod
def handles(obj):
return isinstance(obj, flow.Flow)
def __init__(self, deep_compiler_func, linker):
self._deep_compiler_func = deep_compiler_func
self._linker = linker
def _connect_retry(self, retry, graph):
graph.add_node(retry)
# All nodes that have no predecessors should depend on this retry.
nodes_to = [n for n in graph.no_predecessors_iter() if n is not retry]
if nodes_to:
_add_update_edges(graph, [retry], nodes_to,
attr_dict=_RETRY_EDGE_DATA)
# Add association for each node of graph that has no existing retry.
for n in graph.nodes_iter():
if n is not retry and flow.LINK_RETRY not in graph.node[n]:
graph.node[n][flow.LINK_RETRY] = retry
@staticmethod
def _occurence_detector(to_graph, from_graph):
return iter_utils.count(node for node in from_graph.nodes_iter()
if node in to_graph)
def _decompose_flow(self, flow, parent=None):
"""Decomposes a flow into a graph, tree node + decomposed subgraphs."""
graph = gr.DiGraph(name=flow.name)
node = tr.Node(flow)
if parent is not None:
parent.add(node)
if flow.retry is not None:
node.add(tr.Node(flow.retry))
decomposed_members = {}
for item in flow:
subgraph, _subnode = self._deep_compiler_func(item, parent=node)
decomposed_members[item] = subgraph
if subgraph.number_of_nodes():
graph = gr.merge_graphs(
graph, subgraph,
# We can specialize this to be simpler than the default
# algorithm which creates overhead that we don't
# need for our purposes...
overlap_detector=self._occurence_detector)
return graph, node, decomposed_members
def compile(self, flow, parent=None):
graph, node, decomposed_members = self._decompose_flow(flow,
parent=parent)
self._linker.apply_constraints(graph, flow, decomposed_members)
if flow.retry is not None:
self._connect_retry(flow.retry, graph)
return graph, node
class PatternCompiler(object):
"""Compiles a flow pattern (or task) into a compilation unit.
Let's dive into the basic idea for how this works:
The compiler here is provided a 'root' object via its __init__ method,
this object could be a task, or a flow (one of the supported patterns),
the end-goal is to produce a :py:class:`.Compilation` object as the result
with the needed components. If this is not possible a
:py:class:`~.taskflow.exceptions.CompilationFailure` will be raised.
In the case where a **unknown** type is being requested to compile
a ``TypeError`` will be raised and when a duplicate object (one that
has **already** been compiled) is encountered a ``ValueError`` is raised.
The complexity of this comes into play when the 'root' is a flow that
contains itself other nested flows (and so-on); to compile this object and
its contained objects into a graph that *preserves* the constraints the
pattern mandates we have to go through a recursive algorithm that creates
subgraphs for each nesting level, and then on the way back up through
the recursion (now with a decomposed mapping from contained patterns or
atoms to there corresponding subgraph) we have to then connect the
subgraphs (and the atom(s) there-in) that were decomposed for a pattern
correctly into a new graph (using a :py:class:`.Linker` object to ensure
the pattern mandated constraints are retained) and then return to the
caller (and they will do the same thing up until the root node, which by
that point one graph is created with all contained atoms in the
pattern/nested patterns mandated ordering).
Also maintained in the :py:class:`.Compilation` object is a hierarchy of
the nesting of items (which is also built up during the above mentioned
recusion, via a much simpler algorithm); this is typically used later to
determine the prior atoms of a given atom when looking up values that can
be provided to that atom for execution (see the scopes.py file for how this
works). Note that although you *could* think that the graph itself could be
used for this, which in some ways it can (for limited usage) the hierarchy
retains the nested structure (which is useful for scoping analysis/lookup)
to be able to provide back a iterator that gives back the scopes visible
at each level (the graph does not have this information once flattened).
Let's take an example:
Given the pattern ``f(a(b, c), d)`` where ``f`` is a
:py:class:`~taskflow.patterns.linear_flow.Flow` with items ``a(b, c)``
where ``a`` is a :py:class:`~taskflow.patterns.linear_flow.Flow` composed
of tasks ``(b, c)`` and task ``d``.
The algorithm that will be performed (mirroring the above described logic)
will go through the following steps (the tree hierachy building is left
out as that is more obvious)::
Compiling f
- Decomposing flow f with no parent (must be the root)
- Compiling a
- Decomposing flow a with parent f
- Compiling b
- Decomposing task b with parent a
- Decomposed b into:
Name: b
Nodes: 1
- b
Edges: 0
- Compiling c
- Decomposing task c with parent a
- Decomposed c into:
Name: c
Nodes: 1
- c
Edges: 0
- Relinking decomposed b -> decomposed c
- Decomposed a into:
Name: a
Nodes: 2
- b
- c
Edges: 1
b -> c ({'invariant': True})
- Compiling d
- Decomposing task d with parent f
- Decomposed d into:
Name: d
Nodes: 1
- d
Edges: 0
- Relinking decomposed a -> decomposed d
- Decomposed f into:
Name: f
Nodes: 3
- c
- b
- d
Edges: 2
c -> d ({'invariant': True})
b -> c ({'invariant': True})
"""
def __init__(self, root, freeze=True):
self._root = root
self._history = set()
self._linker = Linker()
self._freeze = freeze
self._lock = threading.Lock()
self._compilation = None
self._matchers = [
_FlowCompiler(self._compile, self._linker),
_TaskCompiler(),
]
self._level = 0
def _compile(self, item, parent=None):
"""Compiles a item (pattern, task) into a graph + tree node."""
for m in self._matchers:
if m.handles(item):
self._pre_item_compile(item)
graph, node = m.compile(item, parent=parent)
self._post_item_compile(item, graph, node)
return graph, node
else:
raise TypeError("Unknown object '%s' (%s) requested to compile"
% (item, type(item)))
def _pre_item_compile(self, item):
"""Called before a item is compiled; any pre-compilation actions."""
if item in self._history:
raise ValueError("Already compiled item '%s' (%s), duplicate"
" and/or recursive compiling is not"
" supported" % (item, type(item)))
self._history.add(item)
if LOG.isEnabledFor(logging.BLATHER):
LOG.blather("%sCompiling '%s'", " " * self._level, item)
self._level += 1
def _post_item_compile(self, item, graph, node):
"""Called after a item is compiled; doing post-compilation actions."""
self._level -= 1
if LOG.isEnabledFor(logging.BLATHER):
prefix = ' ' * self._level
LOG.blather("%sDecomposed '%s' into:", prefix, item)
prefix = ' ' * (self._level + 1)
LOG.blather("%sGraph:", prefix)
for line in graph.pformat().splitlines():
LOG.blather("%s %s", prefix, line)
LOG.blather("%sHierarchy:", prefix)
for line in node.pformat().splitlines():
LOG.blather("%s %s", prefix, line)
def _pre_compile(self):
"""Called before the compilation of the root starts."""
self._history.clear()
self._level = 0
def _post_compile(self, graph, node):
"""Called after the compilation of the root finishes successfully."""
dup_names = misc.get_duplicate_keys(graph.nodes_iter(),
key=lambda node: node.name)
if dup_names:
raise exc.Duplicate(
"Atoms with duplicate names found: %s" % (sorted(dup_names)))
if graph.number_of_nodes() == 0:
raise exc.Empty("Root container '%s' (%s) is empty"
% (self._root, type(self._root)))
self._history.clear()
@fasteners.locked
def compile(self):
"""Compiles the contained item into a compiled equivalent."""
if self._compilation is None:
self._pre_compile()
graph, node = self._compile(self._root, parent=None)
self._post_compile(graph, node)
if self._freeze:
graph.freeze()
node.freeze()
self._compilation = Compilation(graph, node)
return self._compilation
|
|
#!/usr/bin/python
# Raspberry Pi and Sense Hat Stock Symbol Price Ticker
# A simple stock ticker price change monitor of today's positive or negative change using the Sense Hat
# Shows a red/green solid color 8x8 matrix up/down arrow and stock price ticker
import atexit
import datetime
import decimal
import errno
import os
import signal
import sys
import time
import urllib2
import ystockquote
from socket import error as SocketError
# from decimal import *
from sense_hat import SenseHat
sense = SenseHat()
# Rotation (Default=0)
sense.set_rotation(90)
debug = 0
# Stock quote configuration:
tickerSymbol = 'AAPL'
# Functions
# Turn off all the LEDs
def lightsOut():
if debug == 1:
print ('Turning off all LEDs.')
sense.clear()
# Console message and LEDs off while market closed
def marketClosed():
print "Stock Market Closed.", now
lightsOut()
time.sleep(60)
# Turn the LEDs off at program exit
atexit.register(lightsOut)
# Debugging
if debug == 1:
allInfo = ystockquote.get_all(tickerSymbol)
print allInfo
# quote = ystockquote.get_change(tickerSymbol)
# print quote
# print tickerSymbol + " Price = " + allInfo["price"]
# print tickerSymbol + " Change = " + allInfo["change"]
# print allInfo["change"]
# print allInfo
decimal.getcontext().prec = 8
# Handle timeouts gracefully
def timeoutHandler(signum, frame):
print "Error: Timeout fetching quote."
lightsOut()
time.sleep(10)
signal.alarm(0)
return
signal.signal(signal.SIGALRM, timeoutHandler)
# Main function to get the quote and animate the LEDs
def getQuote(change):
if debug == 1:
print "Function getQuote"
try:
signal.alarm(10)
change = ystockquote.get_change(tickerSymbol)
except urllib2.HTTPError as err:
if err.code == 404:
print "Error: 404 Not Found."
else:
print "Error: ", err.code
except urllib2.URLError as err:
print "Error: Connection reset by peer."
except SocketError as err:
if err.errno == errno.ECONNRESET:
print "Error: Connection reset by peer."
# price = ystockquote.get_price(tickerSymbol)
while change == 'N/A':
print "Error: No price change data."
time.sleep(60)
change = ystockquote.get_change(tickerSymbol)
changedecimal = decimal.Decimal(change)
# pricedecimal = Decimal(price)
# changedecimal = 0
# Reset timeout
signal.alarm(0)
# Console message with price change
print "$ Change: ", changedecimal
# print pricedecimal
# lastclose = Decimal(pricedecimal) - Decimal(changedecimal)
# print lastclose
# Negative
if changedecimal < 0:
LED_BRIGHTNESS = abs(int(round(100 * changedecimal)))
if LED_BRIGHTNESS > 255:
LED_BRIGHTNESS = 255
if LED_BRIGHTNESS < 50:
LED_BRIGHTNESS = 50
if debug == 1:
print "Brightness: ", LED_BRIGHTNESS
R = [LED_BRIGHTNESS, 0, 0] # Red
G = [0, LED_BRIGHTNESS, 0] # Green
B = [0, 0, 0] # Black
down_arrow = [
R, R, R, R, R, R, R, R,
R, R, R, R, R, R, R, R,
B, R, R, R, R, R, R, B,
B, R, R, R, R, R, R, B,
B, B, R, R, R, R, B, B,
B, B, R, R, R, R, B, B,
B, B, B, R, R, B, B, B,
B, B, B, R, R, B, B, B
]
sense.set_pixels(down_arrow)
time.sleep(1.5)
sense.show_message(str (changedecimal), text_colour=[LED_BRIGHTNESS, 0, 0])
# Positive
if changedecimal > 0:
LED_BRIGHTNESS = int(round(100 * changedecimal))
if LED_BRIGHTNESS > 255:
LED_BRIGHTNESS = 255
if LED_BRIGHTNESS < 50:
LED_BRIGHTNESS = 50
if debug == 1:
print "Brightness: ", LED_BRIGHTNESS
R = [LED_BRIGHTNESS, 0, 0] # Red
G = [0, LED_BRIGHTNESS, 0] # Green
B = [0, 0, 0] # Black
up_arrow = [
B, B, B, G, G, B, B, B,
B, B, B, G, G, B, B, B,
B, B, G, G, G, G, B, B,
B, B, G, G, G, G, B, B,
B, G, G, G, G, G, G, B,
B, G, G, G, G, G, G, B,
G, G, G, G, G, G, G, G,
G, G, G, G, G, G, G, G
]
sense.set_pixels(up_arrow)
time.sleep(1.5)
sense.show_message( str (changedecimal), text_colour=[0, LED_BRIGHTNESS, 0])
# Zero
if changedecimal == 0:
if debug == 1:
print "Zero Change!"
# sense.clear([50, 50, 50])
sense.show_message(str (changedecimal), text_colour=[50, 50, 50])
time.sleep(.5)
# Main program logic follows:
if __name__ == '__main__':
try:
while True:
now = datetime.datetime.now()
# print now
# Between Monday - Friday
if 0 <= now.weekday() <= 5:
if debug == 1:
print "Weekday: ", now.weekday()
# print now.hour
# Between 9AM - 5PM
if now.hour == 9:
if debug == 1:
print "Hour 9AM"
if now.minute >= 30:
if debug == 1:
print "Minute 30 or later"
if 9 <= now.hour <= 17:
# print "Hour: ", now.hour
# print now.time()
if debug == 1:
print "Calling function getQuote"
getQuote(0)
else:
marketClosed()
else:
if 9 <= now.hour <= 15:
if debug == 1:
print "Hour between 10AM-4PM"
# print "Hour: ", now.hour
# print now.time()
if debug == 1:
print "Calling function getQuote"
getQuote(0)
else:
marketClosed()
else:
marketClosed()
# Handle Ctrl-C gracefully
except KeyboardInterrupt:
lightsOut()
print ' - Exiting'
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
|
import random
import itertools
import numpy as np
import tensorflow as tf
from read_data import DataSet
from tensorflow.contrib.rnn import BasicLSTMCell
from tensorflow.python.ops.rnn import dynamic_rnn
from mytensorflow import get_initializer
from rnn import get_last_relevant_rnn_output, get_sequence_length
from nn import multi_conv1d, highway_network
def get_multi_gpu_models(config):
models = []
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/{}:{}".format(config.device_type, gpu_idx)):
if gpu_idx > 0:
tf.get_variable_scope().reuse_variables()
model = Model(config, scope, rep=gpu_idx == 0)
models.append(model)
return models
class Model(object):
def __init__(self, config, scope, rep=True):
self.scope = scope
self.config = config
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
# Define forward inputs here
N, JX, VW, VC, W = \
config.batch_size, config.max_sent_size, \
config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, None], name='x')
self.cx = tf.placeholder('int32', [N, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, None], name='x_mask')
self.y = tf.placeholder('int32', [N, None], name='y')
self.cy = tf.placeholder('int32', [N, None, W], name='cy')
self.y_mask = tf.placeholder('bool', [N, None], name='y_mask')
self.z = tf.placeholder('float32', [N, 3], name='z')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
# Define misc
self.tensor_dict = {}
self.h_dim = config.hidden_size
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
self.na_prob = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
self.var_ema = None
if rep:
self._build_var_ema()
if config.mode == 'train':
self._build_ema()
self.summary = tf.summary.merge_all()
self.summary = tf.summary.merge(tf.get_collection("summaries", scope=self.scope))
def _build_forward(self):
config = self.config
N, JX, VW, VC, d, W = \
config.batch_size, config.max_sent_size, \
config.word_vocab_size, config.char_vocab_size, \
config.hidden_size, config.max_word_size
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
# Getting word vector
with tf.variable_scope("emb"):
if config.use_char_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, JX, W, dc]
Acy = tf.nn.embedding_lookup(char_emb_mat, self.cy) # [N, JX, W, dc]
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
yy = multi_conv1d(Acy, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
yy = multi_conv1d(Acy, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="yy")
xx = tf.reshape(xx, [-1, JX, dco])
yy = tf.reshape(yy, [-1, JX, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, JX, d]
Ay = tf.nn.embedding_lookup(word_emb_mat, self.y) # [N, JX, d]
self.tensor_dict['x'] = Ax
self.tensor_dict['y'] = Ay
if config.use_char_emb:
xx = tf.concat(axis=2, values=[xx, Ax]) # [N, M, JX, di]
yy = tf.concat(axis=2, values=[yy, Ay]) # [N, JQ, di]
else:
xx = Ax
yy = Ay
# highway network
if config.highway:
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
yy = highway_network(yy, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['yy'] = yy
with tf.variable_scope("encode_x"):
self.fwd_lstm = BasicLSTMCell(self.h_dim, state_is_tuple=True)
self.x_output, self.x_state = dynamic_rnn(cell=self.fwd_lstm, inputs=xx, dtype=tf.float32)
# self.x_output, self.x_state = bidirectional_dynamic_rnn(cell_fw=self.fwd_lstm,cell_bw=self.bwd_lstm,inputs=self.x_emb,dtype=tf.float32)
# print(self.x_output)
with tf.variable_scope("encode_y"):
self.fwd_lstm = BasicLSTMCell(self.h_dim, state_is_tuple=True)
self.y_output, self.y_state = dynamic_rnn(cell=self.fwd_lstm, inputs=yy,
initial_state=self.x_state, dtype=tf.float32)
# print self.y_output
# print self.y_state
length = get_sequence_length(self.y_output)
self.Y = get_last_relevant_rnn_output(self.y_output, length)
self.hstar = self.Y
self.W_pred = tf.get_variable("W_pred", shape=[self.h_dim, 3])
self.logits = tf.matmul(self.hstar, self.W_pred)
print("logits:", self.logits)
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[1]
# self.z: [N, 3]
losses = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.z))
tf.add_to_collection('losses', losses)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _build_ema(self):
self.ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema = self.ema
tensors = tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/vector", scope=self.scope)
ema_op = ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
print('opname:', ema_var.op.name)
print('var:', ema_var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = ema.average(var)
tf.summary.histogram(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def _build_var_ema(self):
self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay)
ema = self.var_ema
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self):
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
assert isinstance(batch, DataSet)
config = self.config
N, JX, VW, VC, d, W = \
config.batch_size, config.max_sent_size, \
config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size
feed_dict = {}
if config.len_opt:
"""
Note that this optimization results in variable GPU RAM usage (i.e. can cause OOM in the middle of training.)
First test without len_opt and make sure no OOM, and use len_opt
"""
if sum(len(sent) for sent in batch.data['x_list']) == 0:
new_JX = 1
else:
new_JX = max(len(sent) for sent in batch.data['x_list'])
if sum(len(ques) for ques in batch.data['y_list']) == 0:
new_JY = 1
else:
new_JY = max(len(ques) for ques in batch.data['y_list'])
JX = min(JX, max(new_JX, new_JY))
x = np.zeros([N, JX], dtype='int32')
cx = np.zeros([N, JX, W], dtype='int32')
x_mask = np.zeros([N, JX], dtype='bool')
y = np.zeros([N, JX], dtype='int32')
cy = np.zeros([N, JX, W], dtype='int32')
y_mask = np.zeros([N, JX], dtype='bool')
z = np.zeros([N, 3], dtype='float32')
feed_dict[self.x] = x
feed_dict[self.x_mask] = x_mask
feed_dict[self.cx] = cx
feed_dict[self.y] = y
feed_dict[self.cy] = cy
feed_dict[self.y_mask] = y_mask
feed_dict[self.z] = z
feed_dict[self.is_train] = is_train
if config.use_glove_for_unk:
feed_dict[self.new_emb_mat] = batch.shared['new_emb_mat']
X = batch.data['x_list']
CX = batch.data['cx_list']
Z = batch.data['z_list']
for i, zi in enumerate(Z):
z[i] = zi
def _get_word(word):
d = batch.shared['word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d:
return d[each]
if config.use_glove_for_unk:
d2 = batch.shared['new_word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d2:
return d2[each] + len(d)
return 1
def _get_char(char):
d = batch.shared['char2idx']
if char in d:
return d[char]
return 1
# replace char data to index.
for i, xi in enumerate(X):
for j, xij in enumerate(xi):
if j == config.max_sent_size:
break
each = _get_word(xij)
assert isinstance(each, int), each
x[i, j] = each
x_mask[i, j] = True
for i, cxi in enumerate(CX):
for j, cxij in enumerate(cxi):
if j == config.max_sent_size:
break
for k, cxijk in enumerate(cxij):
if k == config.max_word_size:
break
cx[i, j, k] = _get_char(cxijk)
for i, qi in enumerate(batch.data['y_list']):
for j, qij in enumerate(qi):
if j == config.max_sent_size:
break
y[i, j] = _get_word(qij)
y_mask[i, j] = True
for i, cqi in enumerate(batch.data['cy_list']):
for j, cqij in enumerate(cqi):
if j == config.max_sent_size:
break
for k, cqijk in enumerate(cqij):
if k == config.max_word_size:
break
cy[i, j, k] = _get_char(cqijk)
if k + 1 == config.max_word_size:
break
return feed_dict
|
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
remove_end,
)
class NFLIE(InfoExtractor):
IE_NAME = 'nfl.com'
_VALID_URL = r'''(?x)
https?://
(?P<host>
(?:www\.)?
(?:
(?:
nfl|
buffalobills|
miamidolphins|
patriots|
newyorkjets|
baltimoreravens|
bengals|
clevelandbrowns|
steelers|
houstontexans|
colts|
jaguars|
titansonline|
denverbroncos|
kcchiefs|
raiders|
chargers|
dallascowboys|
giants|
philadelphiaeagles|
redskins|
chicagobears|
detroitlions|
packers|
vikings|
atlantafalcons|
panthers|
neworleanssaints|
buccaneers|
azcardinals|
stlouisrams|
49ers|
seahawks
)\.com|
.+?\.clubs\.nfl\.com
)
)/
(?:.+?/)*
(?P<id>[^/#?&]+)
'''
_TESTS = [{
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
'md5': '394ef771ddcd1354f665b471d78ec4c6',
'info_dict': {
'id': '0ap3000000398478',
'ext': 'mp4',
'title': 'Week 3: Redskins vs. Eagles highlights',
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
'upload_date': '20140921',
'timestamp': 1411337580,
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
'info_dict': {
'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
'ext': 'mp4',
'title': 'LIVE: Post Game vs. Browns',
'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
'upload_date': '20131229',
'timestamp': 1388354455,
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish',
'info_dict': {
'id': '0ap3000000467607',
'ext': 'mp4',
'title': 'Frustrations flare on the field',
'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.',
'timestamp': 1422850320,
'upload_date': '20150202',
},
}, {
'url': 'http://www.patriots.com/video/2015/09/18/10-days-gillette',
'md5': '4c319e2f625ffd0b481b4382c6fc124c',
'info_dict': {
'id': 'n-238346',
'ext': 'mp4',
'title': '10 Days at Gillette',
'description': 'md5:8cd9cd48fac16de596eadc0b24add951',
'timestamp': 1442618809,
'upload_date': '20150918',
},
}, {
'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
'only_matching': True,
}, {
'url': 'http://www.buffalobills.com/video/videos/Rex_Ryan_Show_World_Wide_Rex/b1dcfab2-3190-4bb1-bfc0-d6e603d6601a',
'only_matching': True,
}]
@staticmethod
def prepend_host(host, url):
if not url.startswith('http'):
if not url.startswith('/'):
url = '/%s' % url
url = 'http://{0:}{1:}'.format(host, url)
return url
@staticmethod
def format_from_stream(stream, protocol, host, path_prefix='',
preference=0, note=None):
url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
protocol=protocol,
host=host,
prefix=path_prefix,
path=stream.get('path'),
)
return {
'url': url,
'vbr': int_or_none(stream.get('rate', 0), 1000),
'preference': preference,
'format_note': note,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id, host = mobj.group('id'), mobj.group('host')
webpage = self._download_webpage(url, video_id)
config_url = NFLIE.prepend_host(host, self._search_regex(
r'(?:(?:config|configURL)\s*:\s*|<nflcs:avplayer[^>]+data-config\s*=\s*)(["\'])(?P<config>.+?)\1',
webpage, 'config URL', default='static/content/static/config/video/config.json',
group='config'))
# For articles, the id in the url is not the video id
video_id = self._search_regex(
r'(?:<nflcs:avplayer[^>]+data-contentId\s*=\s*|contentId\s*:\s*)(["\'])(?P<id>.+?)\1',
webpage, 'video id', default=video_id, group='id')
config = self._download_json(config_url, video_id, 'Downloading player config')
url_template = NFLIE.prepend_host(
host, '{contentURLTemplate:}'.format(**config))
video_data = self._download_json(
url_template.format(id=video_id), video_id)
formats = []
cdn_data = video_data.get('cdnData', {})
streams = cdn_data.get('bitrateInfo', [])
if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
parts = compat_urllib_parse_urlparse(cdn_data.get('uri'))
protocol, host = parts.scheme, parts.netloc
for stream in streams:
formats.append(
NFLIE.format_from_stream(stream, protocol, host))
else:
cdns = config.get('cdns')
if not cdns:
raise ExtractorError('Failed to get CDN data', expected=True)
for name, cdn in cdns.items():
# LimeLight streams don't seem to work
if cdn.get('name') == 'LIMELIGHT':
continue
protocol = cdn.get('protocol')
host = remove_end(cdn.get('host', ''), '/')
if not (protocol and host):
continue
prefix = cdn.get('pathprefix', '')
if prefix and not prefix.endswith('/'):
prefix = '%s/' % prefix
preference = 0
if protocol == 'rtmp':
preference = -2
elif 'prog' in name.lower():
preference = 1
for stream in streams:
formats.append(
NFLIE.format_from_stream(stream, protocol, host,
prefix, preference, name))
self._sort_formats(formats)
thumbnail = None
for q in ('xl', 'l', 'm', 's', 'xs'):
thumbnail = video_data.get('imagePaths', {}).get(q)
if thumbnail:
break
return {
'id': video_id,
'title': video_data.get('headline'),
'formats': formats,
'description': video_data.get('caption'),
'duration': video_data.get('duration'),
'thumbnail': thumbnail,
'timestamp': int_or_none(video_data.get('posted'), 1000),
}
|
|
from peewee import *
from .base import DatabaseTestCase
from .base import IS_CRDB
from .base import IS_CRDB_NESTED_TX
from .base import IS_SQLITE
from .base import ModelTestCase
from .base import db
from .base import new_connection
from .base import skip_if
from .base import skip_unless
from .base_models import Register
class BaseTransactionTestCase(ModelTestCase):
requires = [Register]
def assertRegister(self, vals):
query = Register.select().order_by(Register.value)
self.assertEqual([register.value for register in query], vals)
def _save(self, *vals):
Register.insert([{Register.value: val} for val in vals]).execute()
def requires_nested(fn):
return skip_if(IS_CRDB and not IS_CRDB_NESTED_TX,
'nested transaction support is required')(fn)
class TestTransaction(BaseTransactionTestCase):
def test_simple(self):
self.assertFalse(db.in_transaction())
with db.atomic():
self.assertTrue(db.in_transaction())
self._save(1)
self.assertFalse(db.in_transaction())
self.assertRegister([1])
# Explicit rollback, implicit commit.
with db.atomic() as txn:
self._save(2)
txn.rollback()
self.assertTrue(db.in_transaction())
self._save(3)
self.assertFalse(db.in_transaction())
self.assertRegister([1, 3])
# Explicit rollbacks.
with db.atomic() as txn:
self._save(4)
txn.rollback()
self._save(5)
txn.rollback()
self.assertRegister([1, 3])
@requires_nested
def test_transactions(self):
self.assertFalse(db.in_transaction())
with db.atomic():
self.assertTrue(db.in_transaction())
self._save(1)
self.assertRegister([1])
with db.atomic() as txn:
self._save(2)
txn.rollback()
self._save(3)
with db.atomic() as sp1:
self._save(4)
with db.atomic() as sp2:
self._save(5)
sp2.rollback()
with db.atomic() as sp3:
self._save(6)
with db.atomic() as sp4:
self._save(7)
with db.atomic() as sp5:
self._save(8)
self.assertRegister([1, 3, 4, 6, 7, 8])
sp4.rollback()
self.assertRegister([1, 3, 4, 6])
self.assertRegister([1, 3, 4, 6])
def test_commit_rollback(self):
with db.atomic() as txn:
self._save(1)
txn.commit()
self._save(2)
txn.rollback()
self.assertRegister([1])
with db.atomic() as txn:
self._save(3)
txn.rollback()
self._save(4)
self.assertRegister([1, 4])
@requires_nested
def test_commit_rollback_nested(self):
with db.atomic() as txn:
self.test_commit_rollback()
txn.rollback()
self.assertRegister([])
with db.atomic():
self.test_commit_rollback()
self.assertRegister([1, 4])
def test_nesting_transaction_obj(self):
self.assertRegister([])
with db.transaction() as txn:
self._save(1)
with db.transaction() as txn2:
self._save(2)
txn2.rollback() # Actually issues a rollback.
self.assertRegister([])
self._save(3)
self.assertRegister([3])
with db.transaction() as txn:
self._save(4)
with db.transaction() as txn2:
with db.transaction() as txn3:
self._save(5)
txn3.commit() # Actually commits.
self._save(6)
txn2.rollback()
self.assertRegister([3, 4, 5])
@requires_nested
def test_savepoint_commit(self):
with db.atomic() as txn:
self._save(1)
txn.rollback()
self._save(2)
txn.commit()
with db.atomic() as sp:
self._save(3)
sp.rollback()
self._save(4)
sp.commit()
self.assertRegister([2, 4])
def test_atomic_decorator(self):
@db.atomic()
def save(i):
self._save(i)
save(1)
self.assertRegister([1])
def text_atomic_exception(self):
def will_fail(self):
with db.atomic():
self._save(1)
self._save(None)
self.assertRaises(IntegrityError, will_fail)
self.assertRegister([])
def user_error(self):
with db.atomic():
self._save(2)
raise ValueError
self.assertRaises(ValueError, user_error)
self.assertRegister([])
def test_manual_commit(self):
with db.manual_commit():
db.begin()
self._save(1)
db.rollback()
db.begin()
self._save(2)
db.commit()
with db.manual_commit():
db.begin()
self._save(3)
db.rollback()
db.begin()
self._save(4)
db.commit()
self.assertRegister([2, 4])
def test_mixing_manual_atomic(self):
@db.manual_commit()
def will_fail():
pass
@db.atomic()
def also_fails():
pass
with db.atomic():
self.assertRaises(ValueError, will_fail)
with db.manual_commit():
self.assertRaises(ValueError, also_fails)
with db.manual_commit():
with self.assertRaises(ValueError):
with db.atomic(): pass
with db.atomic():
with self.assertRaises(ValueError):
with db.manual_commit(): pass
def test_closing_db_in_transaction(self):
with db.atomic():
self.assertRaises(OperationalError, db.close)
@requires_nested
def test_db_context_manager(self):
db.close()
self.assertTrue(db.is_closed())
with db:
self.assertFalse(db.is_closed())
self._save(1)
with db:
self._save(2)
try:
with db:
self._save(3)
raise ValueError('xxx')
except ValueError:
pass
self._save(4)
try:
with db:
self._save(5)
with db:
self._save(6)
raise ValueError('yyy')
except ValueError:
pass
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
self.assertRegister([1, 2, 4])
@requires_nested
class TestSession(BaseTransactionTestCase):
def test_session(self):
self.assertTrue(db.session_start())
self.assertTrue(db.session_start())
self.assertEqual(db.transaction_depth(), 2)
self._save(1)
self.assertTrue(db.session_commit())
self.assertEqual(db.transaction_depth(), 1)
self._save(2) # Now we're in autocommit mode.
self.assertTrue(db.session_rollback())
self.assertEqual(db.transaction_depth(), 0)
self.assertTrue(db.session_start())
self._save(3)
self.assertTrue(db.session_rollback())
self.assertRegister([1])
def test_session_with_closed_db(self):
db.close()
self.assertTrue(db.session_start())
self.assertFalse(db.is_closed())
self.assertRaises(OperationalError, db.close)
self._save(1)
self.assertTrue(db.session_rollback())
self.assertRegister([])
def test_session_inside_context_manager(self):
with db.atomic():
self.assertTrue(db.session_start())
self._save(1)
self.assertTrue(db.session_commit())
self._save(2)
self.assertTrue(db.session_rollback())
db.session_start()
self._save(3)
self.assertRegister([1, 3])
def test_commit_rollback_mix(self):
db.session_start()
with db.atomic() as txn: # Will be a savepoint.
self._save(1)
with db.atomic() as t2:
self._save(2)
with db.atomic() as t3:
self._save(3)
t2.rollback()
txn.commit()
self._save(4)
txn.rollback()
self.assertTrue(db.session_commit())
self.assertRegister([1])
def test_session_rollback(self):
db.session_start()
self._save(1)
with db.atomic() as txn:
self._save(2)
with db.atomic() as t2:
self._save(3)
self.assertRegister([1, 2, 3])
self.assertTrue(db.session_rollback())
self.assertRegister([])
db.session_start()
self._save(1)
with db.transaction() as txn:
self._save(2)
with db.transaction() as t2:
self._save(3)
t2.rollback() # Rolls back everything, starts new txn.
db.session_commit()
self.assertRegister([])
def test_session_commit(self):
db.session_start()
self._save(1)
with db.transaction() as txn:
self._save(2)
with db.transaction() as t2:
self._save(3)
t2.commit() # Saves everything, starts new txn.
txn.rollback()
self.assertTrue(db.session_rollback())
self.assertRegister([1, 2, 3])
@skip_unless(IS_SQLITE, 'requires sqlite for transaction lock type')
class TestTransactionLockType(BaseTransactionTestCase):
def test_lock_type(self):
db2 = new_connection(timeout=0.001)
db2.connect()
with self.database.atomic(lock_type='EXCLUSIVE') as txn:
with self.assertRaises(OperationalError):
with db2.atomic(lock_type='IMMEDIATE') as t2:
self._save(1)
self._save(2)
self.assertRegister([2])
with self.database.atomic('IMMEDIATE') as txn:
with self.assertRaises(OperationalError):
with db2.atomic('EXCLUSIVE') as t2:
self._save(3)
self._save(4)
self.assertRegister([2, 4])
with self.database.transaction(lock_type='DEFERRED') as txn:
self._save(5) # Deferred -> Exclusive after our write.
with self.assertRaises(OperationalError):
with db2.transaction(lock_type='IMMEDIATE') as t2:
self._save(6)
self.assertRegister([2, 4, 5])
|
|
import pytest
from graphene import Node
from saleor.checkout import calculations
from saleor.checkout.utils import add_variant_to_checkout
from saleor.payment import ChargeStatus, TransactionKind
from saleor.payment.models import Payment
from tests.api.utils import get_graphql_content
@pytest.fixture()
def checkout_with_variant(checkout, variant):
add_variant_to_checkout(checkout, variant, 1)
checkout.save()
return checkout
@pytest.fixture()
def checkout_with_shipping_method(checkout_with_variant, shipping_method):
checkout = checkout_with_variant
checkout.shipping_method = shipping_method
checkout.save()
return checkout
@pytest.fixture()
def checkout_with_billing_address(checkout_with_shipping_method, address):
checkout = checkout_with_shipping_method
checkout.billing_address = address
checkout.save()
return checkout
@pytest.fixture()
def checkout_with_charged_payment(checkout_with_billing_address):
checkout = checkout_with_billing_address
taxed_total = calculations.checkout_total(checkout)
payment = Payment.objects.create(
gateway="Dummy", is_active=True, total=taxed_total.gross.amount, currency="USD"
)
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.checkout = checkout_with_billing_address
payment.save()
payment.transactions.create(
amount=payment.total,
kind=TransactionKind.CAPTURE,
gateway_response={},
is_success=True,
)
return checkout
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_create_checkout(api_client, graphql_address_data, variant, count_queries):
query = """
fragment Price on TaxedMoney {
gross {
amount
localized
}
currency
}
fragment ProductVariant on ProductVariant {
id
name
price {
amount
currency
localized
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment CheckoutLine on CheckoutLine {
id
quantity
totalPrice {
...Price
}
variant {
...ProductVariant
}
quantity
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
}
fragment ShippingMethod on ShippingMethod {
id
name
price {
currency
amount
localized
}
}
fragment Checkout on Checkout {
token
id
user {
email
}
totalPrice {
...Price
}
subtotalPrice {
...Price
}
billingAddress {
...Address
}
shippingAddress {
...Address
}
email
availableShippingMethods {
...ShippingMethod
}
shippingMethod {
...ShippingMethod
}
shippingPrice {
...Price
}
lines {
...CheckoutLine
}
}
mutation createCheckout($checkoutInput: CheckoutCreateInput!) {
checkoutCreate(input: $checkoutInput) {
errors {
field
message
}
checkout {
...Checkout
}
}
}
"""
variables = {
"checkoutInput": {
"email": "[email protected]",
"shippingAddress": graphql_address_data,
"lines": [
{
"quantity": 1,
"variantId": Node.to_global_id("ProductVariant", variant.pk),
}
],
}
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_add_shipping_to_checkout(
api_client,
graphql_address_data,
variant,
checkout_with_variant,
shipping_method,
count_queries,
):
query = """
fragment Price on TaxedMoney {
gross {
amount
localized
}
currency
}
fragment ProductVariant on ProductVariant {
id
name
price {
amount
currency
localized
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment CheckoutLine on CheckoutLine {
id
quantity
totalPrice {
...Price
}
variant {
...ProductVariant
}
quantity
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
}
fragment ShippingMethod on ShippingMethod {
id
name
price {
currency
amount
localized
}
}
fragment Checkout on Checkout {
token
id
user {
email
}
totalPrice {
...Price
}
subtotalPrice {
...Price
}
billingAddress {
...Address
}
shippingAddress {
...Address
}
email
availableShippingMethods {
...ShippingMethod
}
shippingMethod {
...ShippingMethod
}
shippingPrice {
...Price
}
lines {
...CheckoutLine
}
}
mutation updateCheckoutShippingOptions(
$checkoutId: ID!
$shippingMethodId: ID!
) {
checkoutShippingMethodUpdate(
checkoutId: $checkoutId
shippingMethodId: $shippingMethodId
) {
errors {
field
message
}
checkout {
...Checkout
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_variant.pk),
"shippingMethodId": Node.to_global_id("ShippingMethod", shipping_method.pk),
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_add_billing_address_to_checkout(
api_client, graphql_address_data, checkout_with_shipping_method, count_queries
):
query = """
fragment Price on TaxedMoney {
gross {
amount
localized
}
currency
}
fragment ProductVariant on ProductVariant {
id
name
price {
amount
currency
localized
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment CheckoutLine on CheckoutLine {
id
quantity
totalPrice {
...Price
}
variant {
...ProductVariant
}
quantity
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
}
fragment ShippingMethod on ShippingMethod {
id
name
price {
currency
amount
localized
}
}
fragment Checkout on Checkout {
token
id
user {
email
}
totalPrice {
...Price
}
subtotalPrice {
...Price
}
billingAddress {
...Address
}
shippingAddress {
...Address
}
email
availableShippingMethods {
...ShippingMethod
}
shippingMethod {
...ShippingMethod
}
shippingPrice {
...Price
}
lines {
...CheckoutLine
}
}
mutation updateCheckoutBillingAddress(
$checkoutId: ID!
$billingAddress: AddressInput!
) {
checkoutBillingAddressUpdate(
checkoutId: $checkoutId
billingAddress: $billingAddress
) {
errors {
field
message
}
checkout {
...Checkout
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_shipping_method.pk),
"billingAddress": graphql_address_data,
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_checkout_payment_charge(
api_client, graphql_address_data, checkout_with_billing_address, count_queries
):
query = """
mutation createPayment($input: PaymentInput!, $checkoutId: ID!) {
checkoutPaymentCreate(input: $input, checkoutId: $checkoutId) {
errors {
field
message
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_billing_address.pk),
"input": {
"billingAddress": graphql_address_data,
"amount": 1000, # 10.00 USD * 100
"gateway": "Dummy",
"token": "charged",
},
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_complete_checkout(api_client, checkout_with_charged_payment, count_queries):
query = """
mutation completeCheckout($checkoutId: ID!, $redirectUrl: String) {
checkoutComplete(checkoutId: $checkoutId, redirectUrl: $redirectUrl) {
errors {
field
message
}
order {
id
token
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_charged_payment.pk),
"redirectUrl": "https://www.example.com",
}
get_graphql_content(api_client.post_graphql(query, variables))
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import StringIO
import sys
import fixtures
from nova.cmd import manage
from nova import context
from nova import db
from nova import exception
from nova.i18n import _
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests.objects import test_network
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FixedIpCommands()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertEqual(2, self.commands.reserve('55.55.55.55'))
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
def test_list(self):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list()
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
def test_list_just_one_host(self):
def fake_fixed_ip_get_by_host(*args, **kwargs):
return [db_fakes.fixed_ip_fields]
self.useFixture(fixtures.MonkeyPatch(
'nova.db.fixed_ip_get_by_host',
fake_fixed_ip_get_by_host))
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list('banana')
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
class FloatingIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FloatingIpCommands()
def test_address_to_hosts(self):
def assert_loop(result, expected):
for ip in result:
self.assertIn(str(ip), expected)
address_to_hosts = self.commands.address_to_hosts
# /32 and /31
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/32')
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/31')
# /30
expected = ["192.168.100.%s" % i for i in range(1, 3)]
result = address_to_hosts('192.168.100.0/30')
self.assertEqual(2, len(list(result)))
assert_loop(result, expected)
# /29
expected = ["192.168.100.%s" % i for i in range(1, 7)]
result = address_to_hosts('192.168.100.0/29')
self.assertEqual(6, len(list(result)))
assert_loop(result, expected)
# /28
expected = ["192.168.100.%s" % i for i in range(1, 15)]
result = address_to_hosts('192.168.100.0/28')
self.assertEqual(14, len(list(result)))
assert_loop(result, expected)
# /16
result = address_to_hosts('192.168.100.0/16')
self.assertEqual(65534, len(list(result)))
# NOTE(dripton): I don't test /13 because it makes the test take 3s.
# /12 gives over a million IPs, which is ridiculous.
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/12')
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = manage.NetworkCommands()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vlan_start': 201,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_get_by_uuid(context, uuid):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(uuid, self.fake_net['uuid'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan'], 200)
self.assertEqual(kwargs['vlan_start'], 201)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
cidr='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan=200,
vlan_start=201,
vpn_start=2000,
cidr_v6='fd00:2::/120',
gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4',
uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO.StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
"%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
"%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
head = _fmt % {'id': _('id'),
'cidr': _('IPv4'),
'cidr_v6': _('IPv6'),
'dhcp_start': _('start address'),
'dns1': _('DNS1'),
'dns2': _('DNS2'),
'vlan': _('VlanID'),
'project_id': _('project'),
'uuid': _("uuid")}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_uuid',
self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
class NeutronV2NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NeutronV2NetworkCommandsTestCase, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.commands = manage.NetworkCommands()
def test_create(self):
self.assertEqual(2, self.commands.create())
def test_list(self):
self.assertEqual(2, self.commands.list())
def test_delete(self):
self.assertEqual(2, self.commands.delete())
def test_modify(self):
self.assertEqual(2, self.commands.modify('192.168.0.1'))
class FlavorCommandsTestCase(test.TestCase):
def setUp(self):
super(FlavorCommandsTestCase, self).setUp()
values = dict(name="test.small",
memory_mb=220,
vcpus=1,
root_gb=16,
ephemeral_gb=32,
flavorid=105)
ref = db.flavor_create(context.get_admin_context(),
values)
self.instance_type_name = ref["name"]
self.instance_type_id = ref["id"]
self.instance_type_flavorid = ref["flavorid"]
self.set_key = manage.FlavorCommands().set_key
self.unset_key = manage.FlavorCommands().unset_key
def tearDown(self):
db.flavor_destroy(context.get_admin_context(),
"test.small")
super(FlavorCommandsTestCase, self).tearDown()
def _test_extra_specs_empty(self):
empty_specs = {}
actual_specs = db.flavor_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEqual(empty_specs, actual_specs)
def test_extra_specs_set_unset(self):
expected_specs = {'k1': 'v1'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
actual_specs = db.flavor_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEqual(expected_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
self._test_extra_specs_empty()
def test_extra_specs_update(self):
expected_specs = {'k1': 'v1'}
updated_specs = {'k1': 'v2'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
actual_specs = db.flavor_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEqual(expected_specs, actual_specs)
self.set_key(self.instance_type_name, "k1", "v2")
actual_specs = db.flavor_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEqual(updated_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
def test_extra_specs_multiple(self):
two_items_extra_specs = {'k1': 'v1',
'k3': 'v3'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
self.set_key(self.instance_type_name, "k3", "v3")
actual_specs = db.flavor_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEqual(two_items_extra_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
self.unset_key(self.instance_type_name, "k3")
class ProjectCommandsTestCase(test.TestCase):
def setUp(self):
super(ProjectCommandsTestCase, self).setUp()
self.commands = manage.ProjectCommands()
def test_quota(self):
output = StringIO.StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
print_format = "%-36s %-10s" % ('instances', 'unlimited')
self.assertEqual((print_format in result), True)
def test_quota_update_invalid_key(self):
self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.commands = manage.DbCommands()
def test_archive_deleted_rows_negative(self):
self.assertEqual(1, self.commands.archive_deleted_rows(-1))
class ServiceCommandsTestCase(test.TestCase):
def setUp(self):
super(ServiceCommandsTestCase, self).setUp()
self.commands = manage.ServiceCommands()
def test_service_enable_invalid_params(self):
self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
def test_service_disable_invalid_params(self):
self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
|
|
'''
Created on April 8, 2015
@author: ehenneken
'''
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import range
from flask import current_app, request
import sys
import time
import os
import urllib.request, urllib.parse, urllib.error
import itertools
#import simplejson as json
import json
import numpy as np
import cytoolz as cy
from math import sqrt
from collections import defaultdict
from operator import itemgetter
from datetime import date, datetime
from .models import get_identifiers
from .models import get_basic_stats_data
from .models import get_citations
from .models import get_citation_data
from .models import get_publication_data
from .models import get_usage_data
from .models import get_indicator_data
from .models import get_tori_data
from .models import get_citations_single
# Helper methods
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_norm_histo(l):
d = defaultdict(list)
for tag, num in l:
d[tag].append(num)
return {k: sum(v) for k, v in d.items()}
def merge_dictionaries(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
# Main engine: retrieves the desired statistics
def generate_metrics(**args):
result = {}
usage_data = None
citdata = None
citlists = None
selfcits = None
tdata = None
metrics_types = args.get('types', [])
# If we don't have any metrics type, return empty results
if len(metrics_types) == 0:
return result
tori = args.get('tori', True)
# First retrieve the data we need for our calculations
bibcodes, bibcodes_ref, identifiers, skipped = get_record_info(
bibcodes=args.get('bibcodes', []), query=args.get('query', None))
# if len(bibcodes) == 1 and len(metrics_types) == 0:
# metrics_types = ['basic', 'citations', 'histograms']
# If no identifiers were returned, return empty results
if len(identifiers) == 0:
return result
# Record the bibcodes that fell off the wagon
result['skipped bibcodes'] = skipped
# If there are skipped records, create a log message
if len(skipped) > 0:
current_app.logger.warning('Found %s skipped bibcodes in metrics request: %s'%(len(skipped),",".join(skipped)))
# Start calculating the required statistics and indicators
citdata = usage_data = citlists = selfcits = None
if 'basic' in metrics_types:
basic_stats, basic_stats_refereed, usage_data = \
get_basic_stats(identifiers)
result['basic stats'] = basic_stats
result['basic stats refereed'] = basic_stats_refereed
if 'citations' in metrics_types:
cite_stats, cite_stats_refereed, citdata, selfcits, citlists = \
get_citation_stats(identifiers, bibcodes, bibcodes_ref)
result['citation stats'] = cite_stats
result['citation stats refereed'] = cite_stats_refereed
if 'histograms' in metrics_types:
hists = {}
hist_types = args.get('histograms')
if 'publications' in hist_types and len(identifiers) > 1:
hists['publications'] = get_publication_histograms(identifiers)
if 'reads' in hist_types:
hists['reads'] = get_usage_histograms(identifiers, data=usage_data)
if 'downloads' in hist_types and len(identifiers) > 1:
hists['downloads'] = get_usage_histograms(
identifiers, usage_type='downloads', data=usage_data)
if 'citations' in hist_types:
hists['citations'] = get_citation_histograms(
identifiers, data=citlists)
result['histograms'] = hists
if 'indicators' in metrics_types:
indicators = {}
indic, indic_ref = get_indicators(
identifiers, data=citdata, usagedata=usage_data)
if tori:
tori, tori_ref, riq, riq_ref, tdata = get_tori(
identifiers, bibcodes, self_cits=selfcits)
indic['tori'] = tori
indic['riq'] = riq
indic_ref['tori'] = tori_ref
indic_ref['riq'] = riq_ref
else:
indic['tori'] = 'NA'
indic['riq'] = 'NA'
indic_ref['tori'] = 'NA'
indic_ref['riq'] = 'NA'
result['indicators'] = indic
result['indicators refereed'] = indic_ref
if 'timeseries' in metrics_types or 'time series' in metrics_types:
result['time series'] = get_time_series(
identifiers,
bibcodes,
data=citlists,
usagedata=usage_data,
tori_data=tdata,
include_tori=tori,
self_cits=selfcits)
# The next line takes care of mapping numpy float64 and int64 values to regular floats and integers
# (JSON serialization fails for numpy float64 and int64 classes)
res = json.loads(json.dumps(result, cls=MyEncoder))
return res
# Data retrieval methods
# A. Data before we any sort of computation
# Get bibcodes, identifiers and establish bibcodes for which we have no data
# (which could be because the bibcode is invalid or because something went
# wrong creating the database record fort that publication)
def get_record_info(**args):
# Did we get bibcodes?
if args.get('bibcodes', []):
IDmap = get_identifiers(args['bibcodes'])
IDs = [x[1] for x in IDmap]
bibs = [x[0] for x in IDmap]
bibs_ref = [x[0] for x in IDmap if x[2]]
missing = [b for b in args['bibcodes'] if b not in bibs]
return bibs, bibs_ref, IDs, missing
else:
return {"Error": "Unable to get results!",
"Error Info": "Unsupported metrics request",
"Status Code": 200}
# Get citations, self-citations
def get_selfcitations(identifiers, bibcodes):
data = get_citations(identifiers)
# record the actual self-citations so that we can use that
# information later on in the calculation of the Tori
try:
selfcits = [
(set(p.citations).intersection(set(bibcodes)), p.refereed)
for p in data]
except:
selfcits = [([], False)]
return data, selfcits, 0, 0, 0, 0
Nself = sum([len(c[0]) for c in selfcits])
Nself_refereed = sum([len(c[0]) * c[1] for c in selfcits])
Nciting = len(set(itertools.chain(*[p.citations for p in data])))
Nciting_ref = len(
set(itertools.chain(*[p.citations for p in data if p.refereed])))
return data, selfcits, Nself, Nself_refereed, Nciting, Nciting_ref
# B. Statistics functions
# The basic stats function gets the publication and usage stats
def get_basic_stats(identifiers):
# basic stats for all publications
bs = {}
# basic stats for refereed publications`
bsr = {}
# Get the data to calculate the basic stats
data = get_basic_stats_data(identifiers)
# First get the number of (refereed) papers
bs['number of papers'] = len(identifiers)
bsr['number of papers'] = len([p for p in data if p.refereed])
# Next get the (refereed) normalized paper count
bs['normalized paper count'] = np.sum(
np.array([1.0 / float(p.author_num) for p in data]), dtype=float)
bsr['normalized paper count'] = np.sum(
np.array([1.0 / float(p.author_num) for p in data if p.refereed]), dtype=float)
# Get the total number of reads
year = datetime.now().year
Nentries = year - 1996 + 1
reads = [p.reads for p in data if p.reads and len(p.reads) == Nentries]
reads_ref = [
p.reads for p in data if p.refereed and p.reads and
len(p.reads) == Nentries]
reads_totals = [sum(r) for r in reads]
reads_ref_totals = [sum(r) for r in reads_ref]
bs['total number of reads'] = np.sum(reads_totals or [0], dtype=int)
bsr['total number of reads'] = np.sum(reads_ref_totals or [0], dtype=int)
# Get the average number of reads
bs['average number of reads'] = np.mean(reads_totals or [0], dtype=int)
bsr['average number of reads'] = np.mean(reads_ref_totals or [0], dtype=int)
# Get the median number of reads
bs['median number of reads'] = np.median(reads_totals or [0])
bsr['median number of reads'] = np.median(reads_ref_totals or [0])
# Get the normalized number of reads
# bs['normalized number of reads'] = \
# np.sum([np.array(p.reads)/float(p.author_num)
# for p in data if p.reads and len(p.reads) == Nentries])
# bsr['normalized number of reads'] = \
# sum([p.reads[-1] for p in data if p.refereed and
# p.reads and len(p.reads) == Nentries])
# and finally, get the recent reads
bs['recent number of reads'] = sum(
[p.reads[-1] for p in data if p.reads and len(p.reads) == Nentries])
bsr['recent number of reads'] = sum(
[p.reads[-1] for p in data if p.refereed and p.reads and
len(p.reads) == Nentries])
# Do the same for the downloads
downloads = [
p.downloads for p in data if p.downloads and
len(p.downloads) == Nentries]
downloads_ref = [p.downloads for p in data if p.refereed and
p.downloads and len(p.downloads) == Nentries]
downloads_totals = [sum(d) for d in downloads]
downloads_ref_totals = [sum(d) for d in downloads_ref]
bs['total number of downloads'] = np.sum(downloads_totals or [0], dtype=int)
bsr['total number of downloads'] = np.sum(downloads_ref_totals or [0], dtype=int)
# Get the average number of downloads
bs['average number of downloads'] = np.mean(downloads_totals or [0], dtype=float)
bsr['average number of downloads'] = np.mean(downloads_ref_totals or [0], dtype=float)
# Get the median number of downloads
bs['median number of downloads'] = np.median(downloads_totals or [0])
bsr['median number of downloads'] = np.median(downloads_ref_totals or [0])
# Get the normalized number of downloads
# bs['normalized number of downloads'] = \
# np.sum([np.array(p.downloads)/float(p.author_num) for p in data if
# p.downloads and len(p.downloads) == Nentries])
# bsr['normalized number of downloads'] = \
# np.sum([np.array(p.downloads)/float(p.author_num) for p in data if
# p.refereed and p.downloads and len(p.downloads) == Nentries])
# and finally, get the recent number of downloads
bs['recent number of downloads'] = sum(
[p.downloads[-1] for p in data if p.downloads and
len(p.downloads) == Nentries])
bsr['recent number of downloads'] = sum(
[p.downloads[-1] for p in data if p.refereed and p.downloads and
len(p.downloads) == Nentries])
# Return both results and the data (which will get used later on
# if the usage histograms are required)
return bs, bsr, data
# The citation stats function gets statistics for citations
def get_citation_stats(identifiers, bibcodes, bibcodes_ref):
data = selfcits = citdata = None
# citation stats for all publications
cs = {}
# citation stats for refereed publications
csr = {}
# Get the data to compute the citation statistics
# First get data with just the numbers
data = get_citation_data(identifiers)
Nzero = len(bibcodes) - len(data)
Nzero_ref = len(bibcodes_ref) - \
len([p.citation_num for p in data if p.refereed])
citnums = [p.citation_num for p in data] + [0] * Nzero
ref_citnums = [p.refereed_citation_num for p in data] + [0] * Nzero
citnums_ref = [
p.citation_num for p in data if p.refereed] + [0] * Nzero_ref
ref_citnums_ref = [
p.refereed_citation_num for p in data if p.refereed] + [0] * Nzero_ref
# Next, get more detailed citation information
# (with data to be used later on)
# citdata : data structure with citation data for reuse later on
# selfcits : data structure with self-citations
# Nself : number of self-citations
# Nself_ref : number of self-citations for refereed publications
# Nciting : number of citing papers
# Nciting_ref: number of citing papers for refereed publications
citdata, selfcits, Nself, Nself_ref, Nciting, Nciting_ref = \
get_selfcitations(identifiers, bibcodes)
# The number of unique citing papers and the number of self-citations
cs['number of citing papers'] = Nciting
csr['number of citing papers'] = Nciting_ref
cs['number of self-citations'] = Nself
self_citations = list(itertools.chain(*[list(e[0]) for e in selfcits if len(e[0]) > 0]))
cs['self-citations'] = self_citations
csr['number of self-citations'] = Nself_ref
# The citation stats
# Total number of citations
cs['total number of citations'] = np.sum(
[p.citation_num for p in data] or [0], dtype=int)
csr['total number of citations'] = np.sum(
[p.citation_num for p in data if p.refereed] or [0], dtype=int)
# Average number of citations
cs['average number of citations'] = np.mean(citnums or [0], dtype=float)
csr['average number of citations'] = np.mean(citnums_ref or [0], dtype=float)
# Median number of citations
cs['median number of citations'] = np.median(citnums or [0])
csr['median number of citations'] = np.median(citnums_ref or [0])
# Normalized number of citations
cs['normalized number of citations'] = np.sum(
[float(p.citation_num) / float(p.author_num) for p in data] or [0], dtype=float)
csr['normalized number of citations'] = np.sum(
[float(p.citation_num) / float(p.author_num) for p in data if
p.refereed] or [0], dtype=float)
# The refereed citations stats
##
cs['total number of refereed citations'] = np.sum(
[p.refereed_citation_num for p in data]or [0], dtype=int)
csr['total number of refereed citations'] = np.sum(
[p.refereed_citation_num for p in data if p.refereed] or [0], dtype=int)
cs['average number of refereed citations'] = np.mean(ref_citnums or [0], dtype=float)
csr['average number of refereed citations'] = np.mean(
ref_citnums_ref or [0], dtype=float)
cs['median number of refereed citations'] = np.median(ref_citnums or [0])
csr['median number of refereed citations'] = np.median(
ref_citnums_ref or [0])
cs['normalized number of refereed citations'] = np.sum(
[float(p.refereed_citation_num) / float(p.author_num) for
p in data] or [0], dtype=float)
csr['normalized number of refereed citations'] = np.sum(
[float(p.refereed_citation_num) / float(p.author_num) for
p in data if p.refereed] or [0], dtype=float)
# Send the results back
return cs, csr, data, selfcits, citdata
def get_publication_histograms(identifiers):
ph = {}
current_year = datetime.now().year
# Get necessary data
data = get_publication_data(identifiers)
# Get the publication histogram
years = [int(p.bibcode[:4]) for p in data]
nullhist = [(y, 0) for y in range(min(years), current_year + 1)]
yearhist = cy.frequencies(years)
ph['all publications'] = merge_dictionaries(dict(nullhist), yearhist)
years_ref = [int(p.bibcode[:4]) for p in data if p.refereed]
yearhist = cy.frequencies(years_ref)
ph['refereed publications'] = merge_dictionaries(dict(nullhist), yearhist)
# Get the normalized publication histogram
tmp = [(int(p.bibcode[:4]), 1.0 / float(p.author_num)) for p in data]
ph['all publications normalized'] = get_norm_histo(nullhist + tmp)
tmp = [(int(p.bibcode[:4]), 1.0 / float(p.author_num))
for p in data if p.refereed]
ph['refereed publications normalized'] = get_norm_histo(nullhist + tmp)
return ph
def get_usage_histograms(identifiers, usage_type='reads', data=None):
uh = {}
# Get necessary data if nothing was provided
if not data:
data = get_usage_data(identifiers)
# Determine the current year (so that we know how many entries to expect
# in usage lists)
year = datetime.now().year
Nentries = year - 1996 + 1
zeros = [[0] * Nentries]
if usage_type == 'reads':
# Get all reads data and sum up the individual lists
usage_data = [
p.reads for p in data if p.reads and len(p.reads) == Nentries]
usage = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
# and also get the normalized reads
usage_data = [np.array(p.reads, dtype=int) / float(p.author_num)
for p in data if p.reads and len(p.reads) == Nentries]
usage_norm = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
# Do the same for just the refereed publications
usage_data = [p.reads for p in data if p.refereed and p.reads and
len(p.reads) == Nentries]
usage_ref = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
# and also get the normalized version
usage_data = [np.array(p.reads, dtype=int) / float(p.author_num)
for p in data if p.refereed and p.reads and
len(p.reads) == Nentries]
usage_ref_norm = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
else:
usage_type = 'downloads'
# Get all downloads data and sum up the individual lists
usage_data = [
p.downloads for p in data if p.downloads and
len(p.downloads) == Nentries]
usage = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
# and also get the normalized version
usage_data = [np.array(p.downloads, dtype=int) / float(p.author_num)
for p in data if p.downloads and
len(p.downloads) == Nentries]
usage_norm = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
# Do the same for just the refereed publications
usage_data = [p.downloads for p in data if p.refereed and
p.downloads and len(p.downloads) == Nentries]
usage_ref = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
# and also get the normalized version
usage_data = [np.array(p.downloads, dtype=int) / float(p.author_num)
for p in data if p.refereed and p.downloads and
len(p.downloads) == Nentries]
usage_ref_norm = [sum(sublist)
for sublist in zip(*usage_data or zeros)]
# Construct the histograms (index 0 corresponds with year 1996)
uh['all %s' % usage_type] = dict(
[(1996 + i, v) for i, v in enumerate(usage)])
uh['all %s normalized' % usage_type] = dict(
[(1996 + i, v) for i, v in enumerate(usage_norm)])
uh['refereed %s' % usage_type] = dict(
[(1996 + i, v) for i, v in enumerate(usage_ref)])
uh['refereed %s normalized' % usage_type] = dict(
[(1996 + i, v) for i, v in enumerate(usage_ref_norm)])
return uh
def get_citation_histograms(identifiers, data=None):
ch = {}
current_year = datetime.now().year
# Get necessary data if nothing was provided
if not data:
data = get_citations(identifiers)
if len(data) == 0:
data = get_citations(identifiers, no_zero=False)
years = [int(p.bibcode[:4]) for p in data]
# First gather all necessary data
# refereed -> refereed
rr_data = [([int(c[:4]) for c in p.refereed_citations],
1.0 / float(p.author_num)) for p in data if p.refereed]
# refereed -> non-refereed
rn_data = [([int(c[:4]) for c in p.citations if c in p.refereed_citations],
1.0 / float(p.author_num)) for p in data if not p.refereed]
# non-refereed -> refereed
nr_data = [([int(c[:4]) for c in list(set(p.citations).difference(
set(p.refereed_citations)))], 1.0 / float(p.author_num)) for
p in data if p.refereed]
# non-refereed -> non-refereed
nn_data = [([int(c[:4]) for c in p.citations if
c not in p.refereed_citations],
1.0 / float(p.author_num)) for p in data if not p.refereed]
# First construct the regular histograms
max_year = current_year
rr_hist = cy.frequencies(list(itertools.chain(*[d[0] for d in rr_data])))
rn_hist = cy.frequencies(list(itertools.chain(*[d[0] for d in rn_data])))
nr_hist = cy.frequencies(list(itertools.chain(*[d[0] for d in nr_data])))
nn_hist = cy.frequencies(list(itertools.chain(*[d[0] for d in nn_data])))
try:
max_year = max(max_year, max(rr_hist.keys()))
max_year = max(max_year, max(rn_hist.keys()))
max_year = max(max_year, max(nr_hist.keys()))
max_year = max(max_year, max(nn_hist.keys()))
except:
pass
if max_year > current_year:
current_year = max_year
# Get the earliest citation
try:
min_year = min(
list(rr_hist.keys()) + list(rn_hist.keys()) + list(nr_hist.keys()) + list(nn_hist.keys()))
nullhist = [(y, 0) for y in range(min_year, current_year + 1)]
except:
nullhist = [(y, 0) for y in range(min(years), current_year + 1)]
if len(nullhist) == 0:
nullhist = [(min(years), 0)]
# Now create the histograms with zeroes for year without values
ch['refereed to refereed'] = merge_dictionaries(dict(nullhist), rr_hist)
ch['refereed to nonrefereed'] = merge_dictionaries(dict(nullhist), rn_hist)
ch['nonrefereed to refereed'] = merge_dictionaries(dict(nullhist), nr_hist)
ch['nonrefereed to nonrefereed'] = merge_dictionaries(
dict(nullhist), nn_hist)
min_year = min(list(ch['refereed to refereed'].keys()) +
list(ch['refereed to nonrefereed'].keys()) +
list(ch['nonrefereed to refereed'].keys()) +
list(ch['nonrefereed to nonrefereed'].keys()))
nullhist = [(y, 0) for y in range(min_year, current_year + 1)]
# Normalized histograms need a different approach
tmp = list(itertools.chain(*[[(d, x[1]) for d in x[0]] for x in rr_data]))
ch['refereed to refereed normalized'] = get_norm_histo(nullhist + tmp)
tmp = list(itertools.chain(*[[(d, x[1]) for d in x[0]] for x in rn_data]))
ch['refereed to nonrefereed normalized'] = get_norm_histo(nullhist + tmp)
tmp = list(itertools.chain(*[[(d, x[1]) for d in x[0]] for x in nr_data]))
ch['nonrefereed to refereed normalized'] = get_norm_histo(nullhist + tmp)
tmp = list(itertools.chain(*[[(d, x[1]) for d in x[0]] for x in nn_data]))
ch['nonrefereed to nonrefereed normalized'] = get_norm_histo(
nullhist + tmp)
return ch
def get_indicators(identifiers, data=None, usagedata=None):
ind = {}
ind_ref = {}
# Get the necessary data if we did not get any
if not data:
data = get_indicator_data(identifiers)
if not usagedata:
usagedata = get_usage_data(identifiers)
# Organize the citations with a running index (the citation
# data is already ordered from most to least cited)
citations = [(i + 1, p.citation_num) for i, p in enumerate(data)]
# First the Hirsch index
ind['h'] = max([x[0] for x in citations if x[1] >= x[0]] or [0])
# Next the g index
ind['g'] = max([i for (c, i) in zip(list(np.cumsum([x[1] for
x in citations], axis=0)), [x[0] for x in citations]) if
i**2 <= c] or [0])
# The number of paper with 10 or more citations (i10)
ind['i10'] = len([x for x in citations if x[1] >= 10])
# The number of paper with 100 or more citations (i100)
ind['i100'] = len([x for x in citations if x[1] >= 100])
# The m index is the g index divided by the range of publication years
yrange = datetime.now().year - \
min([int(p.bibcode[:4]) for p in usagedata]) + 1
# In the annoying case where all pubs are from next year, the above should be just 1
yrange = max(yrange, 1)
ind['m'] = float(ind['h']) / float(yrange)
# The read10 index is calculated from current reads for papers published
# in the last 10 years, normalized by number of authors
year = datetime.now().year
Nentries = year - 1996 + 1
ind['read10'] = sum([float(p.reads[-1]) / float(p.author_num)
for p in usagedata if
int(p.bibcode[:4]) > year - 10 and p.reads and
len(p.reads) == Nentries])
d0 = date(datetime.now().year, 1, 1)
d1 = date(datetime.now().year, datetime.now().month, datetime.now().day)
d2 = date(datetime.now().year, 12, 31)
delta = (d1 - d0).days + 1
ndays = (d2 - d0).days + 1
try:
r10_corr = float(ndays)/float(delta)
except:
r10_corr = 1.0
ind['read10'] = ind['read10']*r10_corr
# Now all the values for the refereed publications
citations = [(i + 1, n) for i, n in enumerate([p.citation_num for p in
data if p.refereed])]
# First the Hirsch index
ind_ref['h'] = max([x[0] for x in citations if x[1] >= x[0]] or [0])
# Next the g index
ind_ref['g'] = max([i for (c, i) in zip(list(np.cumsum(
[x[1] for x in citations], axis=0)), [x[0] for x in citations]) if
i**2 <= c] or [0])
# The number of paper with 10 or more citations (i10)
ind_ref['i10'] = len([x for x in citations if x[1] >= 10])
# The number of paper with 100 or more citations (i100)
ind_ref['i100'] = len([x for x in citations if x[1] >= 100])
# The m index is the g index divided by the range of publication years
yrange_ref = datetime.now().year - \
min([int(p.bibcode[:4]) for p in usagedata]) + 1
# In the annoying case where all pubs are from next year, the above should be just 1
yrange_ref = max(yrange_ref, 1)
ind_ref['m'] = float(ind_ref['h']) / float(yrange_ref)
# The read10 index is calculated from current reads for papers published
# in the last 10 years, normalized by number of authors
year = datetime.now().year
Nentries = year - 1996 + 1
ind_ref['read10'] = sum([float(p.reads[-1]) / float(p.author_num)
for p in usagedata if p.refereed and
int(p.bibcode[:4]) > year - 10 and
p.reads and len(p.reads) == Nentries])
ind_ref['read10'] = ind_ref['read10']*r10_corr
# Send results back
return ind, ind_ref
def get_tori(identifiers, bibcodes, self_cits=None):
# Get additional data necessary for Tori calculation
data = get_tori_data(identifiers)
if len(data) == 0:
return 0, 0, 0, 0, []
# If we did not get self-citations, retrieve them
if not self_cits:
self_cits = get_selfcitations(identifiers, bibcodes)[1]
self_citations = set((itertools.chain(*[x[0] for x in self_cits])))
# Now we can calculate the Tori index
tori_data = [p for p in list(itertools.chain(
*[p.rn_citation_data for p in data if p.rn_citation_data])) if
p['bibcode'] not in self_citations and 'pubyear' in p]
tori_data_ref = [p for p in list(itertools.chain(
*[p.rn_citation_data for p in data if p.refereed and
p.rn_citation_data])) if p['bibcode'] not in self_citations]
try:
tori = np.sum(
np.array([r['auth_norm'] * r['ref_norm'] for r in tori_data]), dtype=float)
tori_ref = np.sum(
np.array([r['auth_norm'] * r['ref_norm'] for r in tori_data_ref]), dtype=float)
except:
return 0, 0, 0, 0, tori_data
# The riq index follows from the Tori index and the year range
#yrange = datetime.now().year - min([int(b[:4]) for b in bibcodes]) + 1
yrange = max([int(b[:4]) for b in bibcodes]) - min([int(b[:4]) for b in bibcodes]) + 1
#yrange_ref = datetime.now().year - \
# min([int(p.bibcode[:4]) for p in data]) + 1
# In the annoying case where all pubs are from next year, the above should be just 1
yrange = max(yrange, 1)
riq = int(1000.0 * sqrt(float(tori)) / float(yrange))
riq_ref = int(1000.0 * sqrt(float(tori_ref)) / float(yrange))
# Send the results back
return tori, tori_ref, riq, riq_ref, tori_data
def get_time_series(identifiers, bibcodes, data=None, usagedata=None,
tori_data=None, include_tori=True, self_cits=None):
series = {}
i10 = {}
i100 = {}
h = {}
g = {}
r10 = {}
tori = {}
# Get data if nothing was supplied
if not data:
data = get_citations(identifiers)
if not usagedata:
usagedata = get_usage_data(identifiers)
if not self_cits and include_tori:
self_cits = get_selfcitations(identifiers, bibcodes)[1]
self_citations = set((itertools.chain(*[x[0] for x in self_cits])))
if not tori_data and include_tori:
tdata = get_tori_data(identifiers)
tori_data = [p for p in list(itertools.chain(
*[p.rn_citation_data for p in tdata if p.rn_citation_data])) if
p['bibcode'] not in self_citations and 'pubyear' in p]
# Determine the year range
Nentries = datetime.now().year - 1996 + 1
years = [int(b[:4]) for b in bibcodes]
yrange = list(range(min(years), datetime.now().year + 1))
d0 = date(datetime.now().year, 1, 1)
d1 = date(datetime.now().year, datetime.now().month, datetime.now().day)
d2 = date(datetime.now().year, 12, 31)
delta = (d1 - d0).days + 1
ndays = (d2 - d0).days + 1
try:
r10_corr = float(ndays)/float(delta)
except:
r10_corr = 1.0
for year in yrange:
biblist = [b for b in bibcodes if int(b[:4]) <= year]
citations = sorted([len([int(c[:4]) for c in p.citations if int(
c[:4]) <= year]) for p in data if
p.bibcode in biblist], reverse=True)
if year < 1996:
r10[year] = 0.0
else:
idx = year - 1996
r10[year] = sum([float(p.reads[idx]) / float(p.author_num) for
p in usagedata if p.bibcode in biblist and int(
p.bibcode[:4]) > year - 10 and p.reads and
len(p.reads) == Nentries])
try:
h[year] = max([i for i, n in enumerate(citations, 1) if i <= n])
g[year] = max(
[i for i, n in enumerate(np.cumsum(citations, axis=0), 1) if
i**2 <= n])
except:
h[year] = 0
g[year] = 0
i10[year] = len([c for c in citations if c >= 10])
i100[year] = len([c for c in citations if c >= 100])
if include_tori:
tori[year] = np.sum(np.array([r['auth_norm'] * r['ref_norm'] for
r in tori_data if
r['pubyear'] <= year and
r['cityear'] <= year]))
# When all papers are from next year, the following would fail,
# and therefore we just skip it
try:
r10[datetime.now().year] = r10[datetime.now().year] * r10_corr
except:
pass
series['i10'] = i10
series['i100'] = i100
series['h'] = h
series['g'] = g
series['read10'] = r10
if include_tori:
series['tori'] = tori
return series
def single_citation_report(bibc):
histograms = {}
current_year = datetime.now().year
Nentries = current_year - 1996 + 1
zeros = [[0] * Nentries]
data = get_citations_single(bibc)
try:
cityears = [int(b[:4]) for b in data[0].citations]
except:
cityears = []
try:
refcityears = [int(b[:4]) for b in data[0].refereed_citations]
except:
refcityears = []
try:
reads = [int(r) for r in data[0].reads]
except:
reads = zeros
try:
downloads = [int(d) for d in data[0].downloads]
except:
downloads = zeros
nullhist = [(y, 0) for y in range(min(cityears+refcityears), current_year + 1)]
cithist = cy.frequencies(cityears)
refcithist = cy.frequencies(refcityears)
histograms['citations'] = merge_dictionaries(dict(nullhist), cithist)
histograms['ref_citations'] = merge_dictionaries(dict(nullhist), refcithist)
# Have the histograms start at the publication year
histograms['reads'] = dict([(1996 + i, v) for i, v in enumerate(reads) if 1996+i >= int(bibc[:4])])
histograms['downloads'] = dict([(1996 + i, v) for i, v in enumerate(downloads) if 1996+i >= int(bibc[:4])])
return histograms
|
|
'''
Created on Mar 12, 2011
Based on XParser from May 19, 2005
XParser
@author: shkwok
'''
#from xparser import XNode
from . import XNode
from io import StringIO
from urllib2 import urlopen
''' XParser class
First, instantiate an object:
xparser = XParser ()
Then get input data from either a file openInputFile(fname)
where fname can be a URL or a local file name; or
from openFromString (str), where str is from some other source.
xparser.openInputFile (fname) or
xparser.openFromString (str)
Next call
root = xparser.parse ()
root is a XNode, the root of the XML tree.
'''
class XParser:
'''
Constructor with optional node class
node should be a subclass of XNode.
SAX parsing can be achieve by using a customized node.
node may or may not keep the content to saving memory.
'''
def __init__ (self, node=XNode.XNode):
self.nodeClass = node
def tryOpen (self, fname):
'''
If fname starts with http:// or ftp:// then
use urlopen, otherwise open as plain file.
'''
lst = ('http://', 'ftp://')
for l in lst:
if fname.startswith (l):
return urlopen (fname)
else:
return open (fname, 'r')
def openInputFile (self, fname):
self.blen = 0
self.lineNr = 0
self.column = 0
try:
self.fh = self.tryOpen (fname)
return self.fh
except:
raise IOError, 'Failed to open %s' % fname
def openFromString (self, inputStr):
self.blen = 0
self.lineNr = 0
self.column = 0
self.fh = StringIO (inputStr)
def readChar (self):
'''
Reads one char from self.fh
Fills the input buffer if necessary.
Sets column and lineNr for reporting.
'''
if self.column == self.blen:
self.buffer = self.fh.readline (65535)
self.column = 0
self.blen = len (self.buffer)
self.lineNr += 1
if self.blen == 0:
return False
c = self.buffer[self.column]
self.column += 1
return c
def getLineNrStr (self):
'''
Reports lineNr and column with of the error.
'''
return ('[%d,%d] %s\n' % (self.lineNr, self.column, self.buffer))
def xgetToken (self):
'''
Help function for debugging
'''
t = self.xgetToken ()
#print 'xget (%s)' % t
return t
def getBodyToken (self):
'''
Reads everything until end of tag, which starts with '<'
'''
state = 0
sb = ''
while self.currChar != False:
if state == 0:
if self.currChar == '<':
return ''.join (sb)
elif self.currChar in (' ', '\n', '\t', '\r'):
sb += ' '
else:
sb += self.currChar
x = self.readChar ()
if x == False:
if len (sb) > 0: return sb
else: return False
self.currChar = x
# while
return False
def getToken (self):
'''
Reads a token
Called while reading inside a tag.
'''
state = 0
waitFor = ' '
c1 = 0
sb = ''
while self.currChar != False:
#print 'state', state, self.currChar
if state == 0:
if self.currChar in (' ', '\n', '\r', '\t'):
pass
elif self.currChar == '<':
state = 1
sb = sb + self.currChar
elif self.currChar in ('=', '>', '[', ']'):
c1 = self.currChar
self.currChar = self.readChar ()
return c1
elif self.currChar in ('/', '?'):
state = 3
sb = sb + self.currChar
elif self.currChar in ('\'', '"'):
waitFor = self.currChar
state = 4
sb = ''
elif self.currChar == '-':
sb = sb + self.currChar
state = 5
else:
sb = sb + self.currChar
state = 2
elif state == 1: # got '<', can be <, </, < ?, <!
if self.currChar in ('?', '!', '/'):
sb += self.currChar
self.currChar = self.readChar ()
return sb
elif state == 2: # a string
if self.currChar in (' ', '\n', '\r', '\t'):
self.currChar = self.readChar ()
return sb
elif self.currChar in ('/', '?'):
return sb
elif self.currChar in ('<', '>', '=', '[', '-'):
return sb
else:
sb += self.currChar
elif state == 3: # got '/' or '?'
if self.currChar == '>':
sb += self.currChar
self.currChar = self.readChar ()
return sb
elif self.currChar == '<':
return sb
else:
# '/' or '?' not followed by '>'
sb += self.currChar
state = 2 # is a string
elif state == 4: # got ' or '
if self.currChar == waitFor:
self.currChar = self.readChar ()
return sb
else:
sb += self.currChar
#break
elif state == 5: # got '-'
if self.currChar == '-':
sb += self.currChar
self.currChar = self.readChar ()
return sb
elif self.currChar in ('?', '<', '>', '!'):
return '-'
else:
state = 2
sb += self.currChar
x = self.readChar ()
if x == False:
if len (sb) > 0: return sb
else: return False
self.currChar = x
# while
return False
# getToken
def getComment (self):
'''
Reads a comment <!-- ... -->
'''
state = 99
sb = ''
while self.currChar != False:
if state == 0:
if self.currChar == '-':
state = 1
else:
sb += self.currChar
elif state == 1: # --
if self.currChar == '-':
state = 2
else:
sb += '-'.self.currChar
state = 0
elif state == 2:
if self.currChar == '>':
# got '-.'
self.currChar = self.readChar ()
return sb
else:
sb += '--'.self.currChar
state = 0
elif state == 99:
if self.currChar in (' ', '\n', '\r', '\t'):
state = 0
sb += self.currChar
self.currChar = self.readChar ()
return False
# getComment
def getCDATA (self):
'''
Reads CDATA
'''
state = 0
sb = ''
while self.currChar != False:
if state == 0:
if self.currChar == ']':
state = 1
else:
sb += self.currChar
elif state == 1:
if self.currChar == ']':
state = 2
else:
sb += ']' + self.currChar
state = 0
elif state == 2:
if self.currChar == '>':
self.currChar = self.readChar ()
return sb
else:
state = 0
sb += ']]' + self.currChar
else:
sb += self.currChar
x = self.readChar ()
if x == False:
return sb
self.currChar = x
return False
# getCDATA
'''
The grammar is:
Root ::= Tag
Tag ::= TagStart Body TagEnd
TagStart ::= '<' 'name' AttList '>'
TagEnd ::= '</' 'name' '>'
Attribute ::= 'name' '=' 'value'
AttList ::= Attribute | Attribute AttList | e
Body ::= StringList | TagList
StringList ::= 'string' | 'string' StringList | e
TagList ::= Tag | TabList | e
e ::= ''
'''
def parseXTag (self, token):
'''
Reads tag starting with '<?'
These are tags at the beginning of a xml document.
'''
str = ''
if token != '<?' and token != '<!':
print 'Error token, ? or <! expected\n'
return False
str += token
str += self.currChar
c = self.readChar ()
str += c
while self.currChar != False and c != '>':
c = self.readChar ()
str += c
self.currChar = self.readChar ()
return str
# parseXTag
def parseTagStart (self, token):
'''
Reads start of tags
Checks if CDATA or comment
then reads attributes
checks if ends with '/>'
'''
value = ''
self.tagClosed = 0
if not token in ('<', '<!', '<?'):
print 'Error token %s, < expected\n%s' % \
(token, self.getLineNrStr ())
self.tagClosed = 1
return False
firstToken = token
token = self.getToken ()
if token == '[':
token = self.getToken ()
if token == 'CDATA':
token = self.getToken ()
if token == '[':
cdData = self.nodeClass ('[CDATA[')
token = self.getCDATA ()
cdData.addNode (token)
self.tagClosed = 1
return cdData
if token == '--' and firstToken == '<!':
comments = self.nodeClass ('COMMENT')
token = self.getComment ()
comments.addNode (token)
self.tagClosed = 1
return comments
xn = self.nodeClass (token)
''' Reads tag attributes '''
attname = self.getToken ()
while attname != '>' and attname != '/>':
token = self.getToken ()
if token == '=':
value = self.getToken ()
xn.addAttribute (attname, value)
#print 'Added att ', attname, value, ' to ', xn.name
attname = self.getToken ()
else:
xn.addAttribute (attname, '')
attname = token
if attname == '/>':
self.tagClosed = 1
return xn
# parseTagStart
def parseTagEnd (self, xn, token):
'''
Reads end of tag
Checks that it has a matching opening tag
'''
if token != '</':
print 'Error end tag token, \'</\' expected ' + \
self.getLineNrStr ()
name = self.getToken ()
if name != xn.name:
print 'End of tag ' + \
xn.name + ' expected ' + \
self.getLineNrStr () + ' (name)'
token = self.getToken ()
if token != '>':
print 'Error > expected ' + self.getLineNrStr ()
# parseTagEnd
def parseBody (self, xn):
'''
Reads the content of a tag until end of tag
'''
token = self.getBodyToken()
while token != False:
if token == '</':
self.parseTagEnd (xn, token)
return
''' Two cases:
if token starts with < then it can be end of tag
or it can be a string.
'''
t1 = token.strip ()
if len (t1) > 0:
if t1.startswith ('<'):
xn1 = self.parseIt (t1)
if xn1.name == '[CDATA[':
xn.addNode ('<![CDATA[' + xn1.content[0] + ']]>')
elif xn1.name == 'COMMENT':
xn.addNode ('<!--' + xn1.content[0] + '-->')
else:
xn.addNode (xn1)
else:
xn.addNode (t1)
token = self.getToken ()
return
# parseBody
def parseIt (self, token):
'''
Convenient method for recursion.
'''
xn = self.parseTagStart (token)
if not self.tagClosed:
self.parseBody (xn)
return xn
# parseIt
def parse (self):
'''
Main entry point for parsing.
'''
metaTags = []
self.currChar = ' '
token = self.getToken ()
while token == '<?' or token == '<!':
metaTags.append (self.parseXTag (token))
token = self.getToken ()
# while
xn = self.parseTagStart (token)
if xn == False:
return False
xn.addMetaTags (metaTags)
self.parseBody (xn)
self.fh.close ()
return xn
# parse
def parseFromFile (self, fname):
self.openInputFile(fname)
return self.parse()
def parseFromString (self, str):
self.openFromString(str)
return self.parse()
# XParser
|
|
#
# File: tests.py
# Purpose: test the python extensions for the video sequence reader/writer
# Author: Vlad Morariu
#
# Copyright (c) 2009-2013 Vlad Morariu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cv2
import hashlib
import time
import numpy as np
import cPickle as pickle
import os
import subprocess
import shutil
import unittest
from sequence_reader import SequenceReader
from sequence_writer import SequenceWriter
DATA_DIR = '../data' # this is where videos are downloaded
TMP_DIR = '../data/tmp' # this is where temporary videos are written
def main():
unittest.main()
def get_local_filename(filename, local_dir):
"""Download file if it has not already been downloaded. Return filename."""
local_filename = local_dir + '/' + os.path.basename(filename)
if not os.path.exists(local_filename):
import urllib
print('Downloading %s to %s...' % (filename, local_filename))
if not os.path.exists(local_dir):
os.makedirs(local_dir)
urllib.urlretrieve(filename, local_filename)
return local_filename
def convert_formats(r, w):
"""Copy all frames from the reader to the writer."""
for f in range(r.first, r.last + 1):
w.write(r.read(f), f)
def check_ffmpeg_determinism(fn, tmpdir, frames, iterations, subsample=100):
"""Sometimes ffmpeg 'eats' the first frame, or replaces
the second frame with a duplicate of the first. This is
an upstream problem..."""
hashes = None
for i in range(iterations):
print(' iter %i...' % i)
hashes_ = get_video_hashes_cmd(fn, tmpdir, frames, subsample)
if (hashes != None) and (hashes_ != hashes):
show_mismatch(hashes, hashes_)
mismatch = known_mismatch(hashes, hashes_)
if mismatch == None:
raise ValueError('Unknown mismatch...')
print('Known mismatch (upstream problem): %s' % mismatch)
hashes = hashes_
def check_formats(inputs, nsamples=20, maxlength=500, seqonly=False):
"""Check that various formats of the same video yield exactly the same
frames."""
hashes = []
times = []
orders = []
for fmt, filename, first, last, is_color in inputs:
print(fmt, filename, first, last, is_color)
r = SequenceReader(filename, first, last, is_color)
#display(r)
# try to access a bad frame index
try:
r.read(-2)
raise Exception('bad index not caught')
except IndexError:
pass
# create up with frame read orders
if not orders:
length = r.last - r.first + 1
length = min(maxlength, length)
orders.append(('sequential', np.arange(0, length)))
if not seqonly:
orders.append(('random', np.random.randint(length, size=nsamples)))
orders.append(('random_fwd', np.sort(np.random.randint(length, size=nsamples))))
orders.append(('random_bwd', -np.sort(-np.random.randint(length, size=nsamples))))
orders.append(('reverse_skip', np.arange(length - 1, -1, -max(1, length / nsamples))))
# read the frames in the specified order and compute a hash of the frames
hashes.append([])
times.append([])
for desc, order in orders:
print('fmt: %s, order: %s' % (fmt, desc))
start = time.time()
hashes[-1].append((desc, get_video_hashes(r, r.first + order)))
stop = time.time()
times[-1].append((desc, stop - start))
print('')
match_str = {True: 'match', False: 'mismatch'}
for (input, h, t) in zip(inputs, hashes, times):
print(input[0])
for i, (d, hi) in enumerate(h):
print('%12s: %s, %.3gs' % (d, match_str[hi == hashes[0][i][1]], t[i][1]))
if hi != hashes[0][i][1]:
show_mismatch(hashes[0][i][1], hi)
mismatch = known_mismatch(hashes[0][i][1], hi)
if mismatch == None:
raise ValueError('Unknown mismatch...')
print('Known mismatch (upstream problem): %s' % mismatch)
if h != hashes[0]:
with open('mismatch.pkl', 'wb') as fp:
pickle.dump((inputs, orders, hashes, times), fp)
print('')
def get_video_hash(reader, frames, subsample=100):
"""Returns an accumulated hash of the requested frames."""
sha = hashlib.sha256()
for f in frames:
im = reader.read(f)
sha.update(im.ravel()[::subsample].tostring())
return sha.hexdigest()
def get_video_hashes(reader, frames, subsample=100):
"""Returns a list of hashes, one for each of the requested frames."""
hashes = []
for f in frames:
im = reader.read(f)
sha = hashlib.sha256()
sha.update(im.ravel()[::subsample].tostring())
hashes.append(sha.hexdigest())
return hashes
def get_video_hashes_cmd(fn_in, dir_out, frames, subsample=100):
"""Uses ffmpeg on the command-line to create a sequence of pngs and
compute the frame hash values. NOTE: dir_out is deleted."""
if not os.path.exists(dir_out):
os.makedirs(dir_out)
pat_out = dir_out + '/frames%06d.png'
cmd = ['ffmpeg', '-i', fn_in, '-vsync', '0', '-vframes', str(max(frames)), pat_out]
#cmd = ['ffmpeg', '-i', fn_in, '-vframes', str(max(frames)), pat_out]
devnull = open(os.devnull, 'w')
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
hashes = []
for f in frames:
im = cv2.imread(pat_out % f)
sha = hashlib.sha256()
sha.update(im.ravel()[::subsample].tostring())
hashes.append(sha.hexdigest())
try:
shutil.rmtree(dir_out)
except OSError:
pass
return hashes
def show_mismatch(hashes1, hashes2):
h2i = {}
for i, h in enumerate(hashes1):
h2i.setdefault(h, []).append(i)
for i, (h1, h2) in enumerate(zip(hashes1, hashes2)):
if h1 != h2:
print('mismatch: frame %i of video 2 matches frames %s of video 1' % (
i, h2i.setdefault(h2, [])))
def known_mismatch(hashes1, hashes2):
"""Returns a string if this is a known mismatch."""
def frame_0_dup_(h1, h2): # asymmetric version
return ((h1[0] == h2[0]) and
(h1[2:] == h2[2:]) and
(h1[1] != h2[1] and h2[1] == h1[0]))
def frame_0_dup(h1, h2):
return frame_0_dup_(h1, h2) or frame_0_dup_(h2, h1)
def frame_0_missing(h1, h2):
return (h1[1:] == h2[:-1]) or (h2[:1] == h1[:-1])
for func in [frame_0_dup, frame_0_missing]:
if func(hashes1, hashes2):
return func.__name__
return None
def display(reader):
for f in range(reader.first, reader.last + 1):
im = reader.read(f)
cv2.imshow('display', im)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
class TestSequences(unittest.TestCase):
def test_frame_accuracy(self):
"""Test frame accuracy of sequence reader/writer for multiple formats.
TODO: Split up into setUp, tearDown, and parameterized tests.
"""
inputs = [
('mov', 'http://s3.amazonaws.com/mindseye-y1-development/COLLIDE2_A1_C1_Act1_PARK_MC_AFTN_b43925a1-07b6-11e0-98f2-e80688cb869a.mov', -1, -1, 1),]
formats = [
('tar', '.tar::frames_%06i.png'),
('tgz', '.tar.gz::frames_%06i.png'),
('png', '/frames_%06i.png')]
max_frames_ffmpeg = 10 # max frames for checking ffmpeg determinism
check_determinism = False
iterations = 20 # for checking determinism
for fmt, filename, first, last, is_color in inputs:
filename = get_local_filename(filename, DATA_DIR)
r = SequenceReader(filename, first, last, is_color)
# convert the input video to all formats of interest
outputs = []
for fmt_out, suffix in formats:
fn_out = (TMP_DIR + '/' +
os.path.splitext(os.path.split(filename)[1])[0] + suffix)
if not os.path.isdir(os.path.dirname(fn_out)):
os.makedirs(os.path.dirname(fn_out))
print('Converting to \'%s\'...' % fn_out)
convert_formats(r, SequenceWriter(fn_out, 0, 30, r.shape, 1))
outputs.append((fmt_out, fn_out, r.first, r.last, is_color))
# check determinism by repeated reads (ffmpeg cmdline and python readers)
print('Checking for deterministic ffmpeg reads...')
check_ffmpeg_determinism(
filename, TMP_DIR + '/ffmpeg',
range(1, r.last - r.first + 2)[:max_frames_ffmpeg], iterations)
if check_determinism:
print('Checking for deterministic reads...')
check_formats([(fmt, filename, first, last, is_color),]*iterations, seqonly=True)
for output in outputs:
check_formats([output,]*iterations, seqonly=True)
# read all videos and compare hashes of all frames for various read orders
check_formats([(fmt, filename, first, last, is_color),] + outputs)
# delete output videos
shutil.rmtree(TMP_DIR)
if __name__ == '__main__':
main()
|
|
"""
gdalinfo tests/gis_tests/data/rasters/raster.tif:
Driver: GTiff/GeoTIFF
Files: tests/gis_tests/data/rasters/raster.tif
Size is 163, 174
Coordinate System is:
PROJCS["NAD83 / Florida GDL Albers",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.2572221010002,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",24],
PARAMETER["standard_parallel_2",31.5],
PARAMETER["latitude_of_center",24],
PARAMETER["longitude_of_center",-84],
PARAMETER["false_easting",400000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","3086"]]
Origin = (511700.468070655711927,435103.377123198588379)
Pixel Size = (100.000000000000000,-100.000000000000000)
Metadata:
AREA_OR_POINT=Area
Image Structure Metadata:
INTERLEAVE=BAND
Corner Coordinates:
Upper Left ( 511700.468, 435103.377) ( 82d51'46.16"W, 27d55' 1.53"N)
Lower Left ( 511700.468, 417703.377) ( 82d51'52.04"W, 27d45'37.50"N)
Upper Right ( 528000.468, 435103.377) ( 82d41'48.81"W, 27d54'56.30"N)
Lower Right ( 528000.468, 417703.377) ( 82d41'55.54"W, 27d45'32.28"N)
Center ( 519850.468, 426403.377) ( 82d46'50.64"W, 27d50'16.99"N)
Band 1 Block=163x50 Type=Byte, ColorInterp=Gray
NoData Value=15
"""
import os
import struct
import tempfile
import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils._os import upath
from ..data.rasters.textrasters import JSON_RASTER
if HAS_GDAL:
from django.contrib.gis.gdal import GDALRaster, GDAL_VERSION
from django.contrib.gis.gdal.raster.band import GDALBand
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALRasterTests(unittest.TestCase):
"""
Test a GDALRaster instance created from a file (GeoTiff).
"""
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
self.rs = GDALRaster(self.rs_path)
def test_rs_name_repr(self):
self.assertEqual(self.rs_path, self.rs.name)
six.assertRegex(self, repr(self.rs), "<Raster object at 0x\w+>")
def test_rs_driver(self):
self.assertEqual(self.rs.driver.name, 'GTiff')
def test_rs_size(self):
self.assertEqual(self.rs.width, 163)
self.assertEqual(self.rs.height, 174)
def test_rs_srs(self):
self.assertEqual(self.rs.srs.srid, 3086)
self.assertEqual(self.rs.srs.units, (1.0, 'metre'))
def test_geotransform_and_friends(self):
# Assert correct values for file based raster
self.assertEqual(self.rs.geotransform,
[511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0])
self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986])
self.assertEqual(self.rs.origin.x, 511700.4680706557)
self.assertEqual(self.rs.origin.y, 435103.3771231986)
self.assertEqual(self.rs.scale, [100.0, -100.0])
self.assertEqual(self.rs.scale.x, 100.0)
self.assertEqual(self.rs.scale.y, -100.0)
self.assertEqual(self.rs.skew, [0, 0])
self.assertEqual(self.rs.skew.x, 0)
self.assertEqual(self.rs.skew.y, 0)
# Create in-memory rasters and change gtvalues
rsmem = GDALRaster(JSON_RASTER)
rsmem.geotransform = range(6)
self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)])
self.assertEqual(rsmem.origin, [0, 3])
self.assertEqual(rsmem.origin.x, 0)
self.assertEqual(rsmem.origin.y, 3)
self.assertEqual(rsmem.scale, [1, 5])
self.assertEqual(rsmem.scale.x, 1)
self.assertEqual(rsmem.scale.y, 5)
self.assertEqual(rsmem.skew, [2, 4])
self.assertEqual(rsmem.skew.x, 2)
self.assertEqual(rsmem.skew.y, 4)
self.assertEqual(rsmem.width, 5)
self.assertEqual(rsmem.height, 5)
def test_rs_extent(self):
self.assertEqual(self.rs.extent,
(511700.4680706557, 417703.3771231986,
528000.4680706557, 435103.3771231986))
def test_rs_bands(self):
self.assertEqual(len(self.rs.bands), 1)
self.assertIsInstance(self.rs.bands[0], GDALBand)
def test_file_based_raster_creation(self):
# Prepare tempfile
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
# Create file-based raster from scratch
GDALRaster({
'datatype': self.rs.bands[0].datatype(),
'driver': 'tif',
'name': rstfile.name,
'width': 163,
'height': 174,
'nr_of_bands': 1,
'srid': self.rs.srs.wkt,
'origin': (self.rs.origin.x, self.rs.origin.y),
'scale': (self.rs.scale.x, self.rs.scale.y),
'skew': (self.rs.skew.x, self.rs.skew.y),
'bands': [{
'data': self.rs.bands[0].data(),
'nodata_value': self.rs.bands[0].nodata_value,
}],
})
# Reload newly created raster from file
restored_raster = GDALRaster(rstfile.name)
self.assertEqual(restored_raster.srs.wkt, self.rs.srs.wkt)
self.assertEqual(restored_raster.geotransform, self.rs.geotransform)
if numpy:
numpy.testing.assert_equal(
restored_raster.bands[0].data(),
self.rs.bands[0].data()
)
else:
self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data())
def test_raster_warp(self):
# Create in memory raster
source = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'sourceraster',
'width': 4,
'height': 4,
'nr_of_bands': 1,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(16),
'nodata_value': 255,
}],
})
# Test altering the scale, width, and height of a raster
data = {
'scale': [200, -200],
'width': 2,
'height': 2,
}
target = source.warp(data)
self.assertEqual(target.width, data['width'])
self.assertEqual(target.height, data['height'])
self.assertEqual(target.scale, data['scale'])
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.name, 'sourceraster_copy.MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(result, [5, 7, 13, 15])
# Test altering the name and datatype (to float)
data = {
'name': '/path/to/targetraster.tif',
'datatype': 6,
}
target = source.warp(data)
self.assertEqual(target.bands[0].datatype(), 6)
self.assertEqual(target.name, '/path/to/targetraster.tif')
self.assertEqual(target.driver.name, 'MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(
result,
[0.0, 1.0, 2.0, 3.0,
4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0,
12.0, 13.0, 14.0, 15.0]
)
def test_raster_transform(self):
if GDAL_VERSION < (1, 8, 1):
self.skipTest("GDAL >= 1.8.1 is required for this test")
# Prepare tempfile and nodata value
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
ndv = 99
# Create in file based raster
source = GDALRaster({
'datatype': 1,
'driver': 'tif',
'name': rstfile.name,
'width': 5,
'height': 5,
'nr_of_bands': 1,
'srid': 4326,
'origin': (-5, 5),
'scale': (2, -2),
'skew': (0, 0),
'bands': [{
'data': range(25),
'nodata_value': ndv,
}],
})
# Transform raster into srid 4326.
target = source.transform(3086)
# Reload data from disk
target = GDALRaster(target.name)
self.assertEqual(target.srs.srid, 3086)
self.assertEqual(target.width, 7)
self.assertEqual(target.height, 7)
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.origin, [9124842.791079799, 1589911.6476407414])
self.assertEqual(target.scale, [223824.82664250192, -223824.82664250192])
self.assertEqual(target.skew, [0, 0])
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
# The reprojection of a raster that spans over a large area
# skews the data matrix and might introduce nodata values.
self.assertEqual(
result,
[
ndv, ndv, ndv, ndv, 4, ndv, ndv,
ndv, ndv, 2, 3, 9, ndv, ndv,
ndv, 1, 2, 8, 13, 19, ndv,
0, 6, 6, 12, 18, 18, 24,
ndv, 10, 11, 16, 22, 23, ndv,
ndv, ndv, 15, 21, 22, ndv, ndv,
ndv, ndv, 20, ndv, ndv, ndv, ndv,
]
)
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALBandTests(unittest.TestCase):
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
rs = GDALRaster(self.rs_path)
self.band = rs.bands[0]
def test_band_data(self):
self.assertEqual(self.band.width, 163)
self.assertEqual(self.band.height, 174)
self.assertEqual(self.band.description, '')
self.assertEqual(self.band.datatype(), 1)
self.assertEqual(self.band.datatype(as_string=True), 'GDT_Byte')
self.assertEqual(self.band.min, 0)
self.assertEqual(self.band.max, 255)
self.assertEqual(self.band.nodata_value, 15)
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
# Setting attributes in write mode raises exception in the _flush method
self.assertRaises(GDALException, setattr, band, 'nodata_value', 10)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'mem_rst',
'width': 10,
'height': 10,
'nr_of_bands': 1,
'srid': 4326,
})
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10))
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack('<' + 'B B B B', *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(six.memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2)
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(),
numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
|
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
# pylint: disable=R0201
"""This module contains the CallbackContext class."""
from queue import Queue
from typing import (
TYPE_CHECKING,
Dict,
List,
Match,
NoReturn,
Optional,
Tuple,
Union,
Generic,
Type,
TypeVar,
)
from telegram import Update, CallbackQuery
from telegram.ext import ExtBot
from telegram.ext.utils.types import UD, CD, BD
if TYPE_CHECKING:
from telegram import Bot
from telegram.ext import Dispatcher, Job, JobQueue
CC = TypeVar('CC', bound='CallbackContext')
class CallbackContext(Generic[UD, CD, BD]):
"""
This is a context object passed to the callback called by :class:`telegram.ext.Handler`
or by the :class:`telegram.ext.Dispatcher` in an error handler added by
:attr:`telegram.ext.Dispatcher.add_error_handler` or to the callback of a
:class:`telegram.ext.Job`.
Note:
:class:`telegram.ext.Dispatcher` will create a single context for an entire update. This
means that if you got 2 handlers in different groups and they both get called, they will
get passed the same `CallbackContext` object (of course with proper attributes like
`.matches` differing). This allows you to add custom attributes in a lower handler group
callback, and then subsequently access those attributes in a higher handler group callback.
Note that the attributes on `CallbackContext` might change in the future, so make sure to
use a fairly unique name for the attributes.
Warning:
Do not combine custom attributes and ``@run_async``/
:meth:`telegram.ext.Disptacher.run_async`. Due to how ``run_async`` works, it will
almost certainly execute the callbacks for an update out of order, and the attributes
that you think you added will not be present.
Args:
dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher associated with this context.
Attributes:
matches (List[:obj:`re match object`]): Optional. If the associated update originated from
a regex-supported handler or had a :class:`Filters.regex`, this will contain a list of
match objects for every pattern where ``re.search(pattern, string)`` returned a match.
Note that filters short circuit, so combined regex filters will not always
be evaluated.
args (List[:obj:`str`]): Optional. Arguments passed to a command if the associated update
is handled by :class:`telegram.ext.CommandHandler`, :class:`telegram.ext.PrefixHandler`
or :class:`telegram.ext.StringCommandHandler`. It contains a list of the words in the
text after the command, using any whitespace string as a delimiter.
error (:obj:`Exception`): Optional. The error that was raised. Only present when passed
to a error handler registered with :attr:`telegram.ext.Dispatcher.add_error_handler`.
async_args (List[:obj:`object`]): Optional. Positional arguments of the function that
raised the error. Only present when the raising function was run asynchronously using
:meth:`telegram.ext.Dispatcher.run_async`.
async_kwargs (Dict[:obj:`str`, :obj:`object`]): Optional. Keyword arguments of the function
that raised the error. Only present when the raising function was run asynchronously
using :meth:`telegram.ext.Dispatcher.run_async`.
job (:class:`telegram.ext.Job`): Optional. The job which originated this callback.
Only present when passed to the callback of :class:`telegram.ext.Job`.
"""
__slots__ = (
'_dispatcher',
'_chat_id_and_data',
'_user_id_and_data',
'args',
'matches',
'error',
'job',
'async_args',
'async_kwargs',
'__dict__',
)
def __init__(self, dispatcher: 'Dispatcher'):
"""
Args:
dispatcher (:class:`telegram.ext.Dispatcher`):
"""
if not dispatcher.use_context:
raise ValueError(
'CallbackContext should not be used with a non context aware ' 'dispatcher!'
)
self._dispatcher = dispatcher
self._chat_id_and_data: Optional[Tuple[int, CD]] = None
self._user_id_and_data: Optional[Tuple[int, UD]] = None
self.args: Optional[List[str]] = None
self.matches: Optional[List[Match]] = None
self.error: Optional[Exception] = None
self.job: Optional['Job'] = None
self.async_args: Optional[Union[List, Tuple]] = None
self.async_kwargs: Optional[Dict[str, object]] = None
@property
def dispatcher(self) -> 'Dispatcher':
""":class:`telegram.ext.Dispatcher`: The dispatcher associated with this context."""
return self._dispatcher
@property
def bot_data(self) -> BD:
""":obj:`dict`: Optional. A dict that can be used to keep any data in. For each
update it will be the same ``dict``.
"""
return self.dispatcher.bot_data
@bot_data.setter
def bot_data(self, value: object) -> NoReturn:
raise AttributeError(
"You can not assign a new value to bot_data, see https://git.io/Jt6ic"
)
@property
def chat_data(self) -> Optional[CD]:
""":obj:`dict`: Optional. A dict that can be used to keep any data in. For each
update from the same chat id it will be the same ``dict``.
Warning:
When a group chat migrates to a supergroup, its chat id will change and the
``chat_data`` needs to be transferred. For details see our `wiki page
<https://github.com/python-telegram-bot/python-telegram-bot/wiki/
Storing-bot,-user-and-chat-related-data#chat-migration>`_.
"""
if self._chat_id_and_data:
return self._chat_id_and_data[1]
return None
@chat_data.setter
def chat_data(self, value: object) -> NoReturn:
raise AttributeError(
"You can not assign a new value to chat_data, see https://git.io/Jt6ic"
)
@property
def user_data(self) -> Optional[UD]:
""":obj:`dict`: Optional. A dict that can be used to keep any data in. For each
update from the same user it will be the same ``dict``.
"""
if self._user_id_and_data:
return self._user_id_and_data[1]
return None
@user_data.setter
def user_data(self, value: object) -> NoReturn:
raise AttributeError(
"You can not assign a new value to user_data, see https://git.io/Jt6ic"
)
def refresh_data(self) -> None:
"""If :attr:`dispatcher` uses persistence, calls
:meth:`telegram.ext.BasePersistence.refresh_bot_data` on :attr:`bot_data`,
:meth:`telegram.ext.BasePersistence.refresh_chat_data` on :attr:`chat_data` and
:meth:`telegram.ext.BasePersistence.refresh_user_data` on :attr:`user_data`, if
appropriate.
.. versionadded:: 13.6
"""
if self.dispatcher.persistence:
if self.dispatcher.persistence.store_bot_data:
self.dispatcher.persistence.refresh_bot_data(self.bot_data)
if self.dispatcher.persistence.store_chat_data and self._chat_id_and_data is not None:
self.dispatcher.persistence.refresh_chat_data(*self._chat_id_and_data)
if self.dispatcher.persistence.store_user_data and self._user_id_and_data is not None:
self.dispatcher.persistence.refresh_user_data(*self._user_id_and_data)
def drop_callback_data(self, callback_query: CallbackQuery) -> None:
"""
Deletes the cached data for the specified callback query.
.. versionadded:: 13.6
Note:
Will *not* raise exceptions in case the data is not found in the cache.
*Will* raise :class:`KeyError` in case the callback query can not be found in the
cache.
Args:
callback_query (:class:`telegram.CallbackQuery`): The callback query.
Raises:
KeyError | RuntimeError: :class:`KeyError`, if the callback query can not be found in
the cache and :class:`RuntimeError`, if the bot doesn't allow for arbitrary
callback data.
"""
if isinstance(self.bot, ExtBot):
if not self.bot.arbitrary_callback_data:
raise RuntimeError(
'This telegram.ext.ExtBot instance does not use arbitrary callback data.'
)
self.bot.callback_data_cache.drop_data(callback_query)
else:
raise RuntimeError('telegram.Bot does not allow for arbitrary callback data.')
@classmethod
def from_error(
cls: Type[CC],
update: object,
error: Exception,
dispatcher: 'Dispatcher',
async_args: Union[List, Tuple] = None,
async_kwargs: Dict[str, object] = None,
) -> CC:
"""
Constructs an instance of :class:`telegram.ext.CallbackContext` to be passed to the error
handlers.
.. seealso:: :meth:`telegram.ext.Dispatcher.add_error_handler`
Args:
update (:obj:`object` | :class:`telegram.Update`): The update associated with the
error. May be :obj:`None`, e.g. for errors in job callbacks.
error (:obj:`Exception`): The error.
dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher associated with this
context.
async_args (List[:obj:`object`]): Optional. Positional arguments of the function that
raised the error. Pass only when the raising function was run asynchronously using
:meth:`telegram.ext.Dispatcher.run_async`.
async_kwargs (Dict[:obj:`str`, :obj:`object`]): Optional. Keyword arguments of the
function that raised the error. Pass only when the raising function was run
asynchronously using :meth:`telegram.ext.Dispatcher.run_async`.
Returns:
:class:`telegram.ext.CallbackContext`
"""
self = cls.from_update(update, dispatcher)
self.error = error
self.async_args = async_args
self.async_kwargs = async_kwargs
return self
@classmethod
def from_update(cls: Type[CC], update: object, dispatcher: 'Dispatcher') -> CC:
"""
Constructs an instance of :class:`telegram.ext.CallbackContext` to be passed to the
handlers.
.. seealso:: :meth:`telegram.ext.Dispatcher.add_handler`
Args:
update (:obj:`object` | :class:`telegram.Update`): The update.
dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher associated with this
context.
Returns:
:class:`telegram.ext.CallbackContext`
"""
self = cls(dispatcher)
if update is not None and isinstance(update, Update):
chat = update.effective_chat
user = update.effective_user
if chat:
self._chat_id_and_data = (
chat.id,
dispatcher.chat_data[chat.id], # pylint: disable=W0212
)
if user:
self._user_id_and_data = (
user.id,
dispatcher.user_data[user.id], # pylint: disable=W0212
)
return self
@classmethod
def from_job(cls: Type[CC], job: 'Job', dispatcher: 'Dispatcher') -> CC:
"""
Constructs an instance of :class:`telegram.ext.CallbackContext` to be passed to a
job callback.
.. seealso:: :meth:`telegram.ext.JobQueue`
Args:
job (:class:`telegram.ext.Job`): The job.
dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher associated with this
context.
Returns:
:class:`telegram.ext.CallbackContext`
"""
self = cls(dispatcher)
self.job = job
return self
def update(self, data: Dict[str, object]) -> None:
"""Updates ``self.__slots__`` with the passed data.
Args:
data (Dict[:obj:`str`, :obj:`object`]): The data.
"""
for key, value in data.items():
setattr(self, key, value)
@property
def bot(self) -> 'Bot':
""":class:`telegram.Bot`: The bot associated with this context."""
return self._dispatcher.bot
@property
def job_queue(self) -> Optional['JobQueue']:
"""
:class:`telegram.ext.JobQueue`: The ``JobQueue`` used by the
:class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`
associated with this context.
"""
return self._dispatcher.job_queue
@property
def update_queue(self) -> Queue:
"""
:class:`queue.Queue`: The ``Queue`` instance used by the
:class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`
associated with this context.
"""
return self._dispatcher.update_queue
@property
def match(self) -> Optional[Match[str]]:
"""
`Regex match type`: The first match from :attr:`matches`.
Useful if you are only filtering using a single regex filter.
Returns `None` if :attr:`matches` is empty.
"""
try:
return self.matches[0] # type: ignore[index] # pylint: disable=unsubscriptable-object
except (IndexError, TypeError):
return None
|
|
from typing import Iterable, Optional, Sequence, Union, cast
import pytz
from dateutil.parser import parse as dateparser
from django.core import validators
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from zerver.lib.actions import (
check_schedule_message,
check_send_message,
compute_irc_user_fullname,
compute_jabber_user_fullname,
create_mirror_user_if_needed,
extract_private_recipients,
extract_stream_indicator,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.message import render_markdown
from zerver.lib.request import REQ, get_request_notes, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.timestamp import convert_to_UTC
from zerver.lib.topic import REQ_topic
from zerver.lib.zcommand import process_zcommands
from zerver.lib.zephyr import compute_mit_user_fullname
from zerver.models import (
Client,
Message,
Realm,
RealmDomain,
UserProfile,
email_to_domain,
get_user_including_cross_realm,
)
class InvalidMirrorInput(Exception):
pass
def create_mirrored_message_users(
request: HttpRequest, user_profile: UserProfile, recipients: Iterable[str]
) -> UserProfile:
if "sender" not in request.POST:
raise InvalidMirrorInput("No sender")
sender_email = request.POST["sender"].strip().lower()
referenced_users = {sender_email}
if request.POST["type"] == "private":
for email in recipients:
referenced_users.add(email.lower())
client = get_request_notes(request).client
assert client is not None
if client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
raise InvalidMirrorInput("Unrecognized mirroring client")
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
raise InvalidMirrorInput("At least one user cannot be mirrored")
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_including_cross_realm(sender_email, user_profile.realm)
return sender
def same_realm_zephyr_user(user_profile: UserProfile, email: str) -> bool:
#
# Are the sender and recipient both addresses in the same Zephyr
# mirroring realm? We have to handle this specially, inferring
# the domain from the e-mail address, because the recipient may
# not existing in Zulip and we may need to make a stub Zephyr
# mirroring user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email)
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return (
user_profile.realm.is_zephyr_mirror_realm
and RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
)
def same_realm_irc_user(user_profile: UserProfile, email: str) -> bool:
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be [email protected]
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email).replace("irc.", "")
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_jabber_user(user_profile: UserProfile, email: str) -> bool:
try:
validators.validate_email(email)
except ValidationError:
return False
# If your Jabber users have a different email domain than the
# Zulip users, this is where you would do any translation.
domain = email_to_domain(email)
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
def handle_deferred_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str],
message_content: str,
delivery_type: str,
defer_until: str,
tz_guess: Optional[str],
forwarder_user_profile: UserProfile,
realm: Optional[Realm],
) -> HttpResponse:
deliver_at = None
local_tz = "UTC"
if tz_guess:
local_tz = tz_guess
elif sender.timezone:
local_tz = sender.timezone
try:
deliver_at = dateparser(defer_until)
except ValueError:
raise JsonableError(_("Invalid time format"))
deliver_at_usertz = deliver_at
if deliver_at_usertz.tzinfo is None:
user_tz = pytz.timezone(local_tz)
deliver_at_usertz = user_tz.normalize(user_tz.localize(deliver_at))
deliver_at = convert_to_UTC(deliver_at_usertz)
if deliver_at <= timezone_now():
raise JsonableError(_("Time must be in the future."))
check_schedule_message(
sender,
client,
message_type_name,
message_to,
topic_name,
message_content,
delivery_type,
deliver_at,
realm=realm,
forwarder_user_profile=forwarder_user_profile,
)
return json_success({"deliver_at": str(deliver_at_usertz)})
@has_request_variables
def send_message_backend(
request: HttpRequest,
user_profile: UserProfile,
message_type_name: str = REQ("type"),
req_to: Optional[str] = REQ("to", default=None),
forged_str: Optional[str] = REQ("forged", default=None, documentation_pending=True),
topic_name: Optional[str] = REQ_topic(),
message_content: str = REQ("content"),
widget_content: Optional[str] = REQ(default=None, documentation_pending=True),
realm_str: Optional[str] = REQ("realm_str", default=None, documentation_pending=True),
local_id: Optional[str] = REQ(default=None),
queue_id: Optional[str] = REQ(default=None),
delivery_type: str = REQ("delivery_type", default="send_now", documentation_pending=True),
defer_until: Optional[str] = REQ("deliver_at", default=None, documentation_pending=True),
tz_guess: Optional[str] = REQ("tz_guess", default=None, documentation_pending=True),
) -> HttpResponse:
# If req_to is None, then we default to an
# empty list of recipients.
message_to: Union[Sequence[int], Sequence[str]] = []
if req_to is not None:
if message_type_name == "stream":
stream_indicator = extract_stream_indicator(req_to)
# For legacy reasons check_send_message expects
# a list of streams, instead of a single stream.
#
# Also, mypy can't detect that a single-item
# list populated from a Union[int, str] is actually
# a Union[Sequence[int], Sequence[str]].
if isinstance(stream_indicator, int):
message_to = [stream_indicator]
else:
message_to = [stream_indicator]
else:
message_to = extract_private_recipients(req_to)
# Temporary hack: We're transitioning `forged` from accepting
# `yes` to accepting `true` like all of our normal booleans.
forged = forged_str is not None and forged_str in ["yes", "true"]
client = get_request_notes(request).client
assert client is not None
can_forge_sender = user_profile.can_forge_sender
if forged and not can_forge_sender:
raise JsonableError(_("User not authorized for this query"))
realm = None
if realm_str and realm_str != user_profile.realm.string_id:
# The realm_str parameter does nothing, because it has to match
# the user's realm - but we keep it around for backward compatibility.
raise JsonableError(_("User not authorized for this query"))
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream.
#
# The most important security checks are in
# `create_mirrored_message_users` below, which checks the
# same-realm constraint.
if "sender" not in request.POST:
raise JsonableError(_("Missing sender"))
if message_type_name != "private" and not can_forge_sender:
raise JsonableError(_("User not authorized for this query"))
# For now, mirroring only works with recipient emails, not for
# recipient user IDs.
if not all(isinstance(to_item, str) for to_item in message_to):
raise JsonableError(_("Mirroring not allowed with recipient user IDs"))
# We need this manual cast so that mypy doesn't complain about
# create_mirrored_message_users not being able to accept a Sequence[int]
# type parameter.
message_to = cast(Sequence[str], message_to)
try:
mirror_sender = create_mirrored_message_users(request, user_profile, message_to)
except InvalidMirrorInput:
raise JsonableError(_("Invalid mirrored message"))
if client.name == "zephyr_mirror" and not user_profile.realm.is_zephyr_mirror_realm:
raise JsonableError(_("Zephyr mirroring is not allowed in this organization"))
sender = mirror_sender
else:
if "sender" in request.POST:
raise JsonableError(_("Invalid mirrored message"))
sender = user_profile
if (delivery_type == "send_later" or delivery_type == "remind") and defer_until is None:
raise JsonableError(_("Missing deliver_at in a request for delayed message delivery"))
if (delivery_type == "send_later" or delivery_type == "remind") and defer_until is not None:
return handle_deferred_message(
sender,
client,
message_type_name,
message_to,
topic_name,
message_content,
delivery_type,
defer_until,
tz_guess,
forwarder_user_profile=user_profile,
realm=realm,
)
ret = check_send_message(
sender,
client,
message_type_name,
message_to,
topic_name,
message_content,
forged=forged,
forged_timestamp=request.POST.get("time"),
forwarder_user_profile=user_profile,
realm=realm,
local_id=local_id,
sender_queue_id=queue_id,
widget_content=widget_content,
)
return json_success({"id": ret})
@has_request_variables
def zcommand_backend(
request: HttpRequest, user_profile: UserProfile, command: str = REQ("command")
) -> HttpResponse:
return json_success(process_zcommands(command, user_profile))
@has_request_variables
def render_message_backend(
request: HttpRequest, user_profile: UserProfile, content: str = REQ()
) -> HttpResponse:
message = Message()
message.sender = user_profile
message.content = content
client = get_request_notes(request).client
assert client is not None
message.sending_client = client
rendering_result = render_markdown(message, content, realm=user_profile.realm)
return json_success({"rendered": rendering_result.rendered_content})
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import webob.exc
from nova.api.openstack.compute import hosts as os_hosts_v21
from nova.api.openstack.compute.legacy_v2.contrib import hosts as os_hosts_v2
from nova.compute import power_state
from nova.compute import vm_states
from nova import context as context_maker
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_hosts
def stub_service_get_all(context, disabled=None):
return fake_hosts.SERVICES_LIST
def stub_service_get_by_host_and_binary(context, host_name, binary):
for service in stub_service_get_all(context):
if service['host'] == host_name and service['binary'] == binary:
return service
def stub_set_host_enabled(context, host_name, enabled):
"""Simulates three possible behaviours for VM drivers or compute
drivers when enabling or disabling a host.
'enabled' means new instances can go to this host
'disabled' means they can't
"""
results = {True: "enabled", False: "disabled"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
elif host_name == "host_c2":
# Simulate a failure
return results[not enabled]
else:
# Do the right thing
return results[enabled]
def stub_set_host_maintenance(context, host_name, mode):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
results = {True: "on_maintenance", False: "off_maintenance"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
elif host_name == "host_c2":
# Simulate a failure
return results[not mode]
else:
# Do the right thing
return results[mode]
def stub_host_power_action(context, host_name, action):
if host_name == "notimplemented":
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
return action
def _create_instance(**kwargs):
"""Create a test instance."""
ctxt = context_maker.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
"""Create a dictionary for a test instance."""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = kwargs.get('user_id', 'admin')
inst['project_id'] = kwargs.get('project_id', 'fake')
inst['instance_type_id'] = '1'
if 'host' in kwargs:
inst['host'] = kwargs.get('host')
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['root_gb'] = kwargs.get('root_gb', 30)
inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
inst['task_state'] = kwargs.get('task_state', None)
inst['availability_zone'] = kwargs.get('availability_zone', None)
inst['ami_launch_index'] = 0
inst['launched_on'] = kwargs.get('launched_on', 'dummy')
return inst
class FakeRequestWithNovaZone(object):
environ = {"nova.context": context_maker.get_admin_context()}
GET = {"zone": "nova"}
class HostTestCaseV21(test.TestCase):
"""Test Case for hosts."""
validation_ex = exception.ValidationError
Controller = os_hosts_v21.HostController
policy_ex = exception.PolicyNotAuthorized
def _setup_stubs(self):
# Pretend we have fake_hosts.HOST_LIST in the DB
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
# Only hosts in our fake DB exist
self.stubs.Set(db, 'service_get_by_host_and_binary',
stub_service_get_by_host_and_binary)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_maintenance',
stub_set_host_maintenance)
self.stubs.Set(self.hosts_api, 'host_power_action',
stub_host_power_action)
def setUp(self):
super(HostTestCaseV21, self).setUp()
self.controller = self.Controller()
self.hosts_api = self.controller.api
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self._setup_stubs()
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body=body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
result = self.controller.index(self.req)
self.assertIn('hosts', result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST, hosts)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
self._test_host_update('host_c2', 'status', 'disable', 'enabled')
def test_enable_host(self):
self._test_host_update('host_c1', 'status', 'enable', 'enabled')
self._test_host_update('host_c2', 'status', 'enable', 'disabled')
def test_enable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'enable', 'on_maintenance')
def test_disable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'disable', 'off_maintenance')
def _test_host_update_notimpl(self, key, val):
def stub_service_get_all_notimpl(self, req):
return [{'host': 'notimplemented', 'topic': None,
'availability_zone': None}]
self.stubs.Set(db, 'service_get_all',
stub_service_get_all_notimpl)
body = {key: val}
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.update,
self.req, 'notimplemented', body=body)
def test_disable_host_notimpl(self):
self._test_host_update_notimpl('status', 'disable')
def test_enable_maintenance_notimpl(self):
self._test_host_update_notimpl('maintenance_mode', 'enable')
def test_host_startup(self):
result = self.controller.startup(self.req, "host_c1")
self.assertEqual(result["power_action"], "startup")
def test_host_shutdown(self):
result = self.controller.shutdown(self.req, "host_c1")
self.assertEqual(result["power_action"], "shutdown")
def test_host_reboot(self):
result = self.controller.reboot(self.req, "host_c1")
self.assertEqual(result["power_action"], "reboot")
def _test_host_power_action_notimpl(self, method):
self.assertRaises(webob.exc.HTTPNotImplemented,
method, self.req, "notimplemented")
def test_host_startup_notimpl(self):
self._test_host_power_action_notimpl(self.controller.startup)
def test_host_shutdown_notimpl(self):
self._test_host_power_action_notimpl(self.controller.shutdown)
def test_host_reboot_notimpl(self):
self._test_host_power_action_notimpl(self.controller.reboot)
def test_host_status_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.update(self.req, dest, body={'status': 'enable'})
def test_host_maintenance_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.update(self.req, dest,
body={'maintenance_mode': 'enable'})
def test_host_power_action_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.reboot(self.req, dest)
def test_host_status_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.update(self.req, dest, body={'status': 'enable'})
def test_host_maintenance_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.update(self.req, dest,
body={'maintenance_mode': 'enable'})
def test_host_power_action_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.reboot(self.req, dest)
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
bad_body2 = {"status": "disablabc"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body2)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_bad_update_key_and_correct_update_key(self):
bad_body = {"status": "disable", "crazy": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_good_update_keys(self):
body = {"status": "disable", "maintenance_mode": "enable"}
result = self.controller.update(self.req, 'host_c1', body=body)
self.assertEqual(result["host"], "host_c1")
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
def test_show_host_not_exist(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.show(self.req, dest)
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = self.req.environ["nova.context"]
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
'host': s_ref['host'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': ''}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
"""No instances are running on the given host."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 3)
for resource in result['host']:
self.assertIn(resource['resource']['project'], proj)
self.assertEqual(len(resource['resource']), 5)
self.assertEqual(set(column), set(resource['resource'].keys()))
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
"""show() works correctly as expected."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 5)
for resource in result['host']:
self.assertIn(resource['resource']['project'], proj)
self.assertEqual(len(resource['resource']), 5)
self.assertEqual(set(column), set(resource['resource'].keys()))
db.service_destroy(ctxt, s_ref['id'])
db.instance_destroy(ctxt, i_ref1['uuid'])
db.instance_destroy(ctxt, i_ref2['uuid'])
def test_list_hosts_with_zone(self):
result = self.controller.index(FakeRequestWithNovaZone())
self.assertIn('hosts', result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
class HostTestCaseV20(HostTestCaseV21):
validation_ex = webob.exc.HTTPBadRequest
policy_ex = webob.exc.HTTPForbidden
Controller = os_hosts_v2.HostController
def test_list_hosts_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.index, fakes.HTTPRequest.blank(''))
def test_host_maintenance_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.update, fakes.HTTPRequest.blank(''),
'host_c1', {'maintenance_mode': 'enable'})
def test_startup_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.startup, fakes.HTTPRequest.blank(''),
'host_c1')
def test_reboot_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.reboot, fakes.HTTPRequest.blank(''),
'host_c1')
def test_shutdown_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.shutdown,
fakes.HTTPRequest.blank(''),
'host_c1')
def test_show_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.show,
fakes.HTTPRequest.blank(''),
1)
class HostsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(HostsPolicyEnforcementV21, self).setUp()
self.controller = os_hosts_v21.HostController()
self.req = fakes.HTTPRequest.blank('')
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-hosts"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:os-hosts"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, 1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for common methods used by iLO modules."""
import tempfile
import mock
from oslo_config import cfg
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common import image_service
from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import deploy as ilo_deploy
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers import utils as driver_utils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
if six.PY3:
import io
file = io.BytesIO
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloDeployPrivateMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
def test__get_boot_iso_object_name(self):
boot_iso_actual = ilo_deploy._get_boot_iso_object_name(self.node)
boot_iso_expected = "boot-%s" % self.node.uuid
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
def test__get_boot_iso_http_url(self, service_mock):
url = 'http://abc.org/image/qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
service_mock.assert_called_once_with(mock.ANY, url)
self.assertEqual(url, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
def test__get_boot_iso_url(self, mock_validate):
url = 'http://aaa/bbb'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
self.assertEqual(url, boot_iso_actual)
mock_validate.assert_called_once_with(mock.ANY, url)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
def test__get_boot_iso_unsupported_url(self, validate_href_mock):
validate_href_mock.side_effect = iter(
[exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')])
url = 'file://img.qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.ImageRefValidationFailed,
ilo_deploy._get_boot_iso, task, 'root-uuid')
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_glance_image(self, deploy_info_mock,
image_props_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': 'boot-iso-uuid',
'kernel_id': None,
'ramdisk_id': None}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_iso_expected = 'boot-iso-uuid'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_uefi_no_glance_image(self,
deploy_info_mock,
image_props_mock,
boot_mode_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': None,
'ramdisk_id': None}
properties = {'capabilities': 'boot_mode:uefi'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
boot_iso_result = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
self.assertFalse(boot_mode_mock.called)
self.assertIsNone(boot_iso_result)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_create(self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
swift_api_mock,
create_boot_iso_mock, tempfile_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.pxe.pxe_append_params = 'kernel-params'
swift_obj_mock = swift_api_mock.return_value
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': 'kernel_uuid',
'ramdisk_id': 'ramdisk_uuid'}
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
'kernel_uuid',
'ramdisk_uuid',
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
swift_obj_mock.create_object.assert_called_once_with('ilo-cont',
'abcdef',
'tmpfile')
boot_iso_expected = 'swift:abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(ilo_common, 'copy_image_to_web_server', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_create_use_webserver_true_ramdisk_webserver(
self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
create_boot_iso_mock, tempfile_mock,
copy_file_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_url = "http://10.10.1.30/httpboot"
CONF.deploy.http_root = "/httpboot"
CONF.pxe.pxe_append_params = 'kernel-params'
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
ramdisk_href = "http://10.10.1.30/httpboot/ramdisk"
kernel_href = "http://10.10.1.30/httpboot/kernel"
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': kernel_href,
'ramdisk_id': ramdisk_href}
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
copy_file_mock.return_value = "http://10.10.1.30/httpboot/abcdef"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
kernel_href,
ramdisk_href,
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
boot_iso_expected = 'http://10.10.1.30/httpboot/abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
copy_file_mock.assert_called_once_with(fileobj_mock.name,
'abcdef')
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
def test__clean_up_boot_iso_for_instance(self, swift_mock,
boot_object_name_mock):
swift_obj_mock = swift_mock.return_value
CONF.ilo.swift_ilo_container = 'ilo-cont'
boot_object_name_mock.return_value = 'boot-object'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'swift:bootiso'
self.node.instance_info = i_info
self.node.save()
ilo_deploy._clean_up_boot_iso_for_instance(self.node)
swift_obj_mock.delete_object.assert_called_once_with('ilo-cont',
'boot-object')
@mock.patch.object(utils, 'unlink_without_raise', spec_set=True,
autospec=True)
def test__clean_up_boot_iso_for_instance_on_webserver(self, unlink_mock):
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_root = "/webserver"
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'http://x.y.z.a/webserver/boot-object'
self.node.instance_info = i_info
self.node.save()
boot_iso_path = "/webserver/boot-object"
ilo_deploy._clean_up_boot_iso_for_instance(self.node)
unlink_mock.assert_called_once_with(boot_iso_path)
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
def test__clean_up_boot_iso_for_instance_no_boot_iso(
self, boot_object_name_mock):
ilo_deploy._clean_up_boot_iso_for_instance(self.node)
self.assertFalse(boot_object_name_mock.called)
@mock.patch.object(deploy_utils, 'check_for_missing_params', spec_set=True,
autospec=True)
def test__parse_driver_info(self, check_params_mock):
self.node.driver_info['ilo_deploy_iso'] = 'deploy-iso-uuid'
driver_info_expected = {'ilo_deploy_iso': 'deploy-iso-uuid'}
driver_info_actual = ilo_deploy._parse_driver_info(self.node)
error_msg = ("Error validating iLO virtual media deploy. Some"
" parameters were missing in node's driver_info")
check_params_mock.assert_called_once_with(driver_info_expected,
error_msg)
self.assertEqual(driver_info_expected, driver_info_actual)
@mock.patch.object(ilo_deploy, '_parse_driver_info', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'parse_instance_info', spec_set=True,
autospec=True)
def test__parse_deploy_info(self, instance_info_mock, driver_info_mock):
instance_info_mock.return_value = {'a': 'b'}
driver_info_mock.return_value = {'c': 'd'}
expected_info = {'a': 'b', 'c': 'd'}
actual_info = ilo_deploy._parse_deploy_info(self.node)
self.assertEqual(expected_info, actual_info)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test__reboot_into(self, setup_vmedia_mock, set_boot_device_mock,
node_power_action_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
opts = {'a': 'b'}
ilo_deploy._reboot_into(task, 'iso', opts)
setup_vmedia_mock.assert_called_once_with(task, 'iso', opts)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
node_power_action_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_reboot_into', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', spec_set=True,
autospec=True)
def test__prepare_agent_vmedia_boot(self, build_options_mock,
reboot_into_mock, eject_mock):
deploy_opts = {'a': 'b'}
build_options_mock.return_value = deploy_opts
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso-uuid'
ilo_deploy._prepare_agent_vmedia_boot(task)
eject_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
reboot_into_mock.assert_called_once_with(task,
'deploy-iso-uuid',
deploy_opts)
@mock.patch.object(deploy_utils, 'is_secure_boot_requested', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
def test__update_secure_boot_mode_passed_true(self,
func_set_secure_boot_mode,
func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_is_secure_boot_req.return_value = True
ilo_deploy._update_secure_boot_mode(task, True)
func_set_secure_boot_mode.assert_called_once_with(task, True)
@mock.patch.object(deploy_utils, 'is_secure_boot_requested', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
def test__update_secure_boot_mode_passed_False(self,
func_set_secure_boot_mode,
func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_is_secure_boot_req.return_value = False
ilo_deploy._update_secure_boot_mode(task, False)
self.assertFalse(func_set_secure_boot_mode.called)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_false(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_deploy._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertFalse(func_set_secure_boot_mode.called)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_true(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_deploy._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
func_set_secure_boot_mode.assert_called_once_with(task, False)
self.assertTrue(returned_state)
@mock.patch.object(ilo_deploy.LOG, 'debug', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_exception(self,
func_get_secure_boot_mode,
exception_mock,
mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
func_get_secure_boot_mode.side_effect = Exception
returned_state = ilo_deploy._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertTrue(mock_log.called)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test__prepare_node_for_deploy(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test__prepare_node_for_deploy_sec_boot_on(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
ret_boot_mode = task.node.instance_info['deploy_boot_mode']
self.assertEqual('uefi', ret_boot_mode)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test__prepare_node_for_deploy_inst_info(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
task.node.instance_info = instance_info
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
deploy_boot_mode = task.node.instance_info.get('deploy_boot_mode')
self.assertIsNone(deploy_boot_mode)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test__prepare_node_for_deploy_sec_boot_on_inst_info(
self, func_node_power_action, func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
task.node.instance_info = instance_info
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
deploy_boot_mode = task.node.instance_info.get('deploy_boot_mode')
self.assertIsNone(deploy_boot_mode)
@mock.patch.object(ilo_deploy.LOG, 'warning', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso', spec_set=True,
autospec=True)
def test__recreate_and_populate_boot_iso_root_uuid_set(self,
get_boot_iso_mock,
log_mock):
driver_internal_info = {}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info['root_uuid_or_disk_id'] = 'root-uuid'
task.node.driver_internal_info = driver_internal_info
r_uuid = task.node.driver_internal_info['root_uuid_or_disk_id']
get_boot_iso_mock.return_value = 'boot-uuid'
ilo_deploy._recreate_and_populate_ilo_boot_iso(task)
self.assertEqual(task.node.instance_info['ilo_boot_iso'],
'boot-uuid')
get_boot_iso_mock.assert_called_once_with(task, r_uuid)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_deploy.LOG, 'warning', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso', spec_set=True,
autospec=True)
def test__recreate_and_populate_boot_iso_root_not_set(self,
get_boot_iso_mock,
log_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['ilo_boot_iso'] = 'boot-uuid-old-iso'
ilo_deploy._recreate_and_populate_ilo_boot_iso(task)
self.assertEqual(task.node.instance_info['ilo_boot_iso'],
'boot-uuid-old-iso')
self.assertFalse(get_boot_iso_mock.called)
self.assertTrue(log_mock.called)
@mock.patch.object(ilo_deploy.LOG, 'warning',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso',
spec_set=True, autospec=True)
def test__recreate_and_populate_get_boot_iso_fails(self,
get_boot_iso_mock,
log_mock):
driver_internal_info = {}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info['boot_iso_created_in_web_server'] = True
driver_internal_info['root_uuid_or_disk_id'] = 'uuid'
task.node.instance_info['ilo_boot_iso'] = 'boot-uuid-old-iso'
task.node.driver_internal_info = driver_internal_info
task.node.save()
r_uuid = task.node.driver_internal_info.get('root_uuid_or_disk_id')
get_boot_iso_mock.side_effect = Exception
ilo_deploy._recreate_and_populate_ilo_boot_iso(task)
self.assertEqual(task.node.instance_info['ilo_boot_iso'],
'boot-uuid-old-iso')
get_boot_iso_mock.assert_called_once_with(task, r_uuid)
self.assertTrue(log_mock.called)
@mock.patch.object(ilo_deploy.LOG, 'warning',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso',
spec_set=True, autospec=True)
def test__recreate_and_populate_get_boot_iso_none(self,
boot_iso_mock,
log_mock):
driver_internal_info = {}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info['boot_iso_created_in_web_server'] = True
driver_internal_info['root_uuid_or_disk_id'] = 'uuid'
task.node.driver_internal_info = driver_internal_info
r_uuid = task.node.driver_internal_info.get('root_uuid_or_disk_id')
task.node.instance_info['ilo_boot_iso'] = 'boot-uuid-old-iso'
task.node.save()
boot_iso_mock.return_value = None
ilo_deploy._recreate_and_populate_ilo_boot_iso(task)
boot_iso_mock.assert_called_once_with(task, r_uuid)
self.assertEqual(task.node.instance_info['ilo_boot_iso'],
'boot-uuid-old-iso')
self.assertTrue(log_mock.called)
class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaIscsiDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
@mock.patch.object(deploy_utils, 'validate_capabilities',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'validate', spec_set=True, autospec=True)
def _test_validate(self, validate_mock,
deploy_info_mock,
validate_prop_mock,
validate_capability_mock,
props_expected):
d_info = {'image_source': 'uuid'}
deploy_info_mock.return_value = d_info
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
validate_mock.assert_called_once_with(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(
task.context, d_info, props_expected)
validate_capability_mock.assert_called_once_with(task.node)
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'validate', spec_set=True, autospec=True)
def test_validate_invalid_boot_option(self,
validate_mock,
deploy_info_mock,
validate_prop_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
properties = {'capabilities': 'boot_mode:uefi,boot_option:foo'}
deploy_info_mock.return_value = d_info
props = ['kernel_id', 'ramdisk_id']
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
exc = self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate,
task)
validate_mock.assert_called_once_with(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(task.context,
d_info, props)
self.assertIn('boot_option', str(exc))
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'validate', spec_set=True, autospec=True)
def test_validate_invalid_boot_mode(self,
validate_mock,
deploy_info_mock,
validate_prop_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
properties = {'capabilities': 'boot_mode:foo,boot_option:local'}
deploy_info_mock.return_value = d_info
props = ['kernel_id', 'ramdisk_id']
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
exc = self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate,
task)
validate_mock.assert_called_once_with(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(task.context,
d_info, props)
self.assertIn('boot_mode', str(exc))
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test_validate_glance_partition_image(self, is_glance_image_mock):
is_glance_image_mock.return_value = True
self._test_validate(props_expected=['kernel_id', 'ramdisk_id'])
def test_validate_whole_disk_image(self):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
self._test_validate(props_expected=[])
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test_validate_non_glance_partition_image(self, is_glance_image_mock):
is_glance_image_mock.return_value = False
self._test_validate(props_expected=['kernel', 'ramdisk'])
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_reboot_into', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy, 'check_image_size', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'cache_instance_image', spec_set=True,
autospec=True)
def _test_deploy(self,
cache_instance_image_mock,
check_image_size_mock,
build_opts_mock,
agent_options_mock,
get_nic_mock,
reboot_into_mock,
eject_mock,
ilo_boot_iso,
image_source
):
instance_info = self.node.instance_info
instance_info['ilo_boot_iso'] = ilo_boot_iso
instance_info['image_source'] = image_source
self.node.instance_info = instance_info
self.node.save()
deploy_opts = {'a': 'b'}
agent_options_mock.return_value = {
'ipa-api-url': 'http://1.2.3.4:6385'}
build_opts_mock.return_value = deploy_opts
get_nic_mock.return_value = '12:34:56:78:90:ab'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
returned_state = task.driver.deploy.deploy(task)
eject_mock.assert_called_once_with(task)
cache_instance_image_mock.assert_called_once_with(task.context,
task.node)
check_image_size_mock.assert_called_once_with(task)
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab',
'ipa-api-url': 'http://1.2.3.4:6385'}
build_opts_mock.assert_called_once_with(task.node)
get_nic_mock.assert_called_once_with(task)
reboot_into_mock.assert_called_once_with(task, 'deploy-iso',
expected_ramdisk_opts)
self.assertEqual(states.DEPLOYWAIT, returned_state)
def test_deploy_glance_image(self):
self._test_deploy(
ilo_boot_iso='swift:abcdef',
image_source='6b2f0c0c-79e8-4db6-842e-43c9764204af')
self.node.refresh()
self.assertNotIn('ilo_boot_iso', self.node.instance_info)
def test_deploy_not_a_glance_image(self):
self._test_deploy(
ilo_boot_iso='http://mybootiso',
image_source='http://myimage')
self.node.refresh()
self.assertEqual('http://mybootiso',
self.node.instance_info['ilo_boot_iso'])
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_tear_down(self,
node_power_action_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = True
driver_internal_info['root_uuid_or_disk_id'] = 'uuid'
task.node.driver_internal_info = driver_internal_info
task.node.save()
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertEqual(states.DELETED, returned_state)
dinfo = task.node.driver_internal_info
self.assertNotIn('boot_iso_created_in_web_server', dinfo)
self.assertNotIn('root_uuid_or_disk_id', dinfo)
@mock.patch.object(ilo_deploy.LOG, 'warn', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_tear_down_handle_exception(self,
node_power_action_mock,
update_secure_boot_mode_mock,
exception_mock,
mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
update_secure_boot_mode_mock.side_effect = Exception
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertTrue(mock_log.called)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy, '_clean_up_boot_iso_for_instance',
spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy, 'destroy_images', spec_set=True,
autospec=True)
def test_clean_up(self, destroy_images_mock, clean_up_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.clean_up(task)
destroy_images_mock.assert_called_once_with(task.node.uuid)
clean_up_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_deploy, '_clean_up_boot_iso_for_instance',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'destroy_floppy_image_from_web_server',
spec_set=True, autospec=True)
def test_clean_up_of_webserver_images(self, destroy_images_mock,
clean_up_boot_mock):
CONF.ilo.use_web_server_for_images = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.clean_up(task)
destroy_images_mock.assert_called_once_with(task.node)
clean_up_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy', spec_set=True,
autospec=True)
def test_prepare(self, func_prepare_node_for_deploy):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
func_prepare_node_for_deploy.assert_called_once_with(task)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy', spec_set=True,
autospec=True)
def test_prepare_active_node(self, func_prepare_node_for_deploy):
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
self.assertFalse(func_prepare_node_for_deploy.called)
@mock.patch.object(ilo_deploy, '_recreate_and_populate_ilo_boot_iso',
spec_set=True, autospec=True)
def test_take_over_recreate_iso_config_and_dif_set(self, mock_recreate):
driver_internal_info = {}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
CONF.ilo.use_web_server_for_images = True
driver_internal_info['boot_iso_created_in_web_server'] = True
task.node.driver_internal_info = driver_internal_info
task.node.save()
task.driver.deploy.take_over(task)
mock_recreate.assert_called_once_with(task)
@mock.patch.object(ilo_deploy, '_recreate_and_populate_ilo_boot_iso',
spec_set=True, autospec=True)
def test_take_over_recreate_iso_config_set_and_dif_not_set(self,
mock_recreate):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
CONF.ilo.use_web_server_for_images = True
task.node.save()
task.driver.deploy.take_over(task)
self.assertFalse(mock_recreate.called)
@mock.patch.object(ilo_deploy, '_recreate_and_populate_ilo_boot_iso',
spec_set=True, autospec=True)
def test_take_over_recreate_iso_config_not_set(self, mock_recreate):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
CONF.ilo.use_web_server_for_images = False
task.node.save()
task.driver.deploy.take_over(task)
self.assertFalse(mock_recreate.called)
class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaAgentDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='agent_ilo', driver_info=INFO_DICT)
@mock.patch.object(deploy_utils, 'validate_capabilities',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self,
parse_driver_info_mock,
validate_capability_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
parse_driver_info_mock.assert_called_once_with(task.node)
validate_capability_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_deploy, '_prepare_agent_vmedia_boot', spec_set=True,
autospec=True)
def test_deploy(self, vmedia_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.deploy(task)
vmedia_boot_mock.assert_called_once_with(task)
self.assertEqual(states.DEPLOYWAIT, returned_state)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_tear_down(self,
node_power_action_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy.LOG, 'warn', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_tear_down_handle_exception(self,
node_power_action_mock,
update_secure_boot_mode_mock,
exception_mock,
mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
update_secure_boot_mode_mock.side_effect = Exception
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertTrue(mock_log.called)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(agent, 'build_instance_info_for_deploy', spec_set=True,
autospec=True)
def test_prepare(self,
build_instance_info_mock,
func_prepare_node_for_deploy):
deploy_opts = {'a': 'b'}
build_instance_info_mock.return_value = deploy_opts
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
self.assertEqual(deploy_opts, task.node.instance_info)
func_prepare_node_for_deploy.assert_called_once_with(task)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(agent, 'build_instance_info_for_deploy', spec_set=True,
autospec=True)
def test_prepare_active_node(self,
build_instance_info_mock,
func_prepare_node_for_deploy):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.ACTIVE
task.driver.deploy.prepare(task)
self.assertFalse(build_instance_info_mock.called)
self.assertFalse(func_prepare_node_for_deploy.called)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports',
spec_set=True, autospec=True)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.create_cleaning_ports',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_prepare_agent_vmedia_boot', spec_set=True,
autospec=True)
def test_prepare_cleaning(self, vmedia_boot_mock, create_port_mock,
delete_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.prepare_cleaning(task)
vmedia_boot_mock.assert_called_once_with(task)
self.assertEqual(states.CLEANWAIT, returned_state)
create_port_mock.assert_called_once_with(mock.ANY, task)
delete_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(task.node.driver_internal_info.get(
'agent_erase_devices_iterations'), 1)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports',
spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_tear_down_cleaning(self, power_mock, delete_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.driver.deploy.tear_down_cleaning(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
delete_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step', spec_set=True,
autospec=True)
def test_execute_clean_step(self, execute_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.driver.deploy.execute_clean_step(task, 'fake-step')
execute_mock.assert_called_once_with(task, 'fake-step')
@mock.patch.object(deploy_utils, 'agent_get_clean_steps', spec_set=True,
autospec=True)
def test_get_clean_steps_with_conf_option(self, get_clean_step_mock):
self.config(clean_priority_erase_devices=20, group='ilo')
get_clean_step_mock.return_value = [{
'step': 'erase_devices',
'priority': 10,
'interface': 'deploy',
'reboot_requested': False
}]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
step = task.driver.deploy.get_clean_steps(task)
get_clean_step_mock.assert_called_once_with(task)
self.assertEqual(step[0].get('priority'),
CONF.ilo.clean_priority_erase_devices)
@mock.patch.object(deploy_utils, 'agent_get_clean_steps', spec_set=True,
autospec=True)
def test_get_clean_steps_erase_devices_disable(self, get_clean_step_mock):
self.config(clean_priority_erase_devices=0, group='ilo')
get_clean_step_mock.return_value = [{
'step': 'erase_devices',
'priority': 10,
'interface': 'deploy',
'reboot_requested': False
}]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
step = task.driver.deploy.get_clean_steps(task)
get_clean_step_mock.assert_called_once_with(task)
self.assertEqual(step[0].get('priority'),
CONF.ilo.clean_priority_erase_devices)
@mock.patch.object(deploy_utils, 'agent_get_clean_steps', spec_set=True,
autospec=True)
def test_get_clean_steps_without_conf_option(self, get_clean_step_mock):
get_clean_step_mock.return_value = [{
'step': 'erase_devices',
'priority': 10,
'interface': 'deploy',
'reboot_requested': False
}]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
step = task.driver.deploy.get_clean_steps(task)
get_clean_step_mock.assert_called_once_with(task)
self.assertEqual(step[0].get('priority'), 10)
class VendorPassthruTestCase(db_base.DbTestCase):
def setUp(self):
super(VendorPassthruTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo',
driver_info=INFO_DICT)
@mock.patch.object(iscsi_deploy, 'get_deploy_info', spec_set=True,
autospec=True)
def test_validate_pass_deploy_info(self, get_deploy_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = ilo_deploy.VendorPassthru()
vendor.validate(task, method='pass_deploy_info', foo='bar')
get_deploy_info_mock.assert_called_once_with(task.node,
foo='bar')
@mock.patch.object(iscsi_deploy, 'validate_pass_bootloader_info_input',
spec_set=True, autospec=True)
def test_validate_pass_bootloader_install_info(self,
validate_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
kwargs = {'address': '1.2.3.4', 'key': 'fake-key',
'status': 'SUCCEEDED', 'error': ''}
task.driver.vendor.validate(
task, method='pass_bootloader_install_info', **kwargs)
validate_mock.assert_called_once_with(task, kwargs)
@mock.patch.object(iscsi_deploy, 'get_deploy_info', spec_set=True,
autospec=True)
def test_validate_heartbeat(self, get_deploy_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = ilo_deploy.VendorPassthru()
vendor.validate(task, method='heartbeat', foo='bar')
self.assertFalse(get_deploy_info_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso', spec_set=True,
autospec=True)
def test__configure_vmedia_boot_with_boot_iso(
self, get_boot_iso_mock, setup_vmedia_mock, set_boot_device_mock):
root_uuid = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_boot_iso_mock.return_value = 'boot.iso'
task.driver.vendor._configure_vmedia_boot(
task, root_uuid)
get_boot_iso_mock.assert_called_once_with(
task, root_uuid)
setup_vmedia_mock.assert_called_once_with(
task, 'boot.iso')
set_boot_device_mock.assert_called_once_with(
task, boot_devices.CDROM, persistent=True)
self.assertEqual('boot.iso',
task.node.instance_info['ilo_boot_iso'])
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso', spec_set=True,
autospec=True)
def test__configure_vmedia_boot_without_boot_iso(
self, get_boot_iso_mock, setup_vmedia_mock, set_boot_device_mock):
root_uuid = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_boot_iso_mock.return_value = None
task.driver.vendor._configure_vmedia_boot(
task, root_uuid)
get_boot_iso_mock.assert_called_once_with(
task, root_uuid)
self.assertFalse(setup_vmedia_mock.called)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status',
spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy, 'finish_deploy', spec_set=True,
autospec=True)
def test_pass_bootloader_install_info(self, finish_deploy_mock,
validate_input_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_bootloader_install_info(task, **kwargs)
finish_deploy_mock.assert_called_once_with(task, '123456')
validate_input_mock.assert_called_once_with(task, kwargs)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_pass_deploy_info_good(self, cleanup_vmedia_boot_mock,
continue_deploy_mock, get_boot_iso_mock,
setup_vmedia_mock, set_boot_device_mock,
func_update_boot_mode,
func_update_secure_boot_mode,
notify_ramdisk_to_proceed_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': 'root-uuid'}
get_boot_iso_mock.return_value = 'boot-iso'
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
get_boot_iso_mock.assert_called_once_with(task, 'root-uuid')
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM,
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
self.assertEqual('boot-iso',
task.node.instance_info['ilo_boot_iso'])
info = task.node.driver_internal_info['root_uuid_or_disk_id']
self.assertEqual('root-uuid', info)
notify_ramdisk_to_proceed_mock.assert_called_once_with('123456')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_pass_deploy_info_bad(self, cleanup_vmedia_boot_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
self.node.provision_state = states.AVAILABLE
self.node.target_provision_state = states.NOSTATE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = task.driver.vendor
self.assertRaises(exception.InvalidState,
vendor.pass_deploy_info,
task, **kwargs)
self.assertEqual(states.AVAILABLE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(cleanup_vmedia_boot_mock.called)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_get_boot_iso', spec_set=True,
autospec=True)
def test_pass_deploy_info_create_boot_iso_fail(
self, get_iso_mock, cleanup_vmedia_boot_mock, continue_deploy_mock,
node_power_mock, update_boot_mode_mock,
update_secure_boot_mode_mock):
kwargs = {'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': 'root-uuid'}
get_iso_mock.side_effect = iter([exception.ImageCreationFailed(
image_type='iso', error="error")])
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
get_iso_mock.assert_called_once_with(task, 'root-uuid')
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNotNone(task.node.last_error)
@mock.patch.object(iscsi_deploy, 'finish_deploy', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_pass_deploy_info_boot_option_local(
self, cleanup_vmedia_boot_mock, continue_deploy_mock,
func_update_boot_mode, func_update_secure_boot_mode,
set_boot_device_mock, notify_ramdisk_to_proceed_mock,
finish_deploy_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'}
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = task.driver.vendor
vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
notify_ramdisk_to_proceed_mock.assert_called_once_with('123456')
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(finish_deploy_mock.called)
@mock.patch.object(iscsi_deploy, 'finish_deploy', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def _test_pass_deploy_info_whole_disk_image(
self, cleanup_vmedia_boot_mock, continue_deploy_mock,
func_update_boot_mode, func_update_secure_boot_mode,
set_boot_device_mock, notify_ramdisk_to_proceed_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'}
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = task.driver.vendor
vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
iscsi_deploy.finish_deploy.assert_called_once_with(task, '123456')
def test_pass_deploy_info_whole_disk_image_local(self):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.save()
self._test_pass_deploy_info_whole_disk_image()
def test_pass_deploy_info_whole_disk_image(self):
self._test_pass_deploy_info_whole_disk_image()
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy.VendorPassthru, '_configure_vmedia_boot',
spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_continue_deploy_netboot(self, cleanup_vmedia_boot_mock,
do_agent_iscsi_deploy_mock,
configure_vmedia_boot_mock,
reboot_and_finish_deploy_mock,
boot_mode_cap_mock,
update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'root uuid': 'some-root-uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_vmedia_boot_mock.assert_called_once_with(
mock.ANY, task, 'some-root-uuid')
boot_mode_cap_mock.assert_called_once_with(task)
update_secure_boot_mock.assert_called_once_with(task, True)
reboot_and_finish_deploy_mock.assert_called_once_with(
mock.ANY, task)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy', spec_set=True,
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot', spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_continue_deploy_localboot(self, cleanup_vmedia_boot_mock,
do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
reboot_and_finish_deploy_mock,
boot_mode_cap_mock,
update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'root uuid': 'some-root-uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_local_boot_mock.assert_called_once_with(
mock.ANY, task, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
boot_mode_cap_mock.assert_called_once_with(task)
update_secure_boot_mock.assert_called_once_with(task, True)
reboot_and_finish_deploy_mock.assert_called_once_with(
mock.ANY, task)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy', spec_set=True,
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot', spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_continue_deploy_whole_disk_image(
self, cleanup_vmedia_boot_mock, do_agent_iscsi_deploy_mock,
configure_local_boot_mock, reboot_and_finish_deploy_mock,
boot_mode_cap_mock, update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'disk identifier': 'some-disk-id'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_local_boot_mock.assert_called_once_with(
mock.ANY, task, root_uuid=None, efi_system_part_uuid=None)
reboot_and_finish_deploy_mock.assert_called_once_with(
mock.ANY, task)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy', spec_set=True,
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot', spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_continue_deploy_localboot_uefi(self, cleanup_vmedia_boot_mock,
do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
reboot_and_finish_deploy_mock,
boot_mode_cap_mock,
update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'root uuid': 'some-root-uuid',
'efi system partition uuid': 'efi-system-part-uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_local_boot_mock.assert_called_once_with(
mock.ANY, task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
boot_mode_cap_mock.assert_called_once_with(task)
update_secure_boot_mock.assert_called_once_with(task, True)
reboot_and_finish_deploy_mock.assert_called_once_with(
mock.ANY, task)
@mock.patch.object(ilo_deploy, '_reboot_into', spec_set=True,
autospec=True)
def test_boot_into_iso(self, reboot_into_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.boot_into_iso(task, boot_iso_href='foo')
reboot_into_mock.assert_called_once_with(task, 'foo',
ramdisk_options=None)
@mock.patch.object(ilo_deploy.VendorPassthru, '_validate_boot_into_iso',
spec_set=True, autospec=True)
def test_validate_boot_into_iso(self, validate_boot_into_iso_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = ilo_deploy.VendorPassthru()
vendor.validate(task, method='boot_into_iso', foo='bar')
validate_boot_into_iso_mock.assert_called_once_with(
vendor, task, {'foo': 'bar'})
def test__validate_boot_into_iso_invalid_state(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.AVAILABLE
self.assertRaises(
exception.InvalidStateRequested,
task.driver.vendor._validate_boot_into_iso,
task, {})
def test__validate_boot_into_iso_missing_boot_iso_href(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.MANAGEABLE
self.assertRaises(
exception.MissingParameterValue,
task.driver.vendor._validate_boot_into_iso,
task, {})
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
def test__validate_boot_into_iso_manage(self, validate_image_prop_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
info = {'boot_iso_href': 'foo'}
task.node.provision_state = states.MANAGEABLE
task.driver.vendor._validate_boot_into_iso(
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, {'image_source': 'foo'}, [])
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
def test__validate_boot_into_iso_maintenance(
self, validate_image_prop_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
info = {'boot_iso_href': 'foo'}
task.node.maintenance = True
task.driver.vendor._validate_boot_into_iso(
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, {'image_source': 'foo'}, [])
class IloPXEDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPXEDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='pxe_ilo', driver_info=INFO_DICT)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'validate', spec_set=True,
autospec=True)
def test_validate(self, pxe_validate_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
pxe_validate_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'prepare', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy', spec_set=True,
autospec=True)
def test_prepare(self,
prepare_node_mock,
pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
task.driver.deploy.prepare(task)
prepare_node_mock.assert_called_once_with(task)
pxe_prepare_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'prepare', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy', spec_set=True,
autospec=True)
def test_prepare_active_node(self,
prepare_node_mock,
pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.ACTIVE
task.node.properties['capabilities'] = 'boot_mode:uefi'
task.driver.deploy.prepare(task)
self.assertFalse(prepare_node_mock.called)
pxe_prepare_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'prepare', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy', spec_set=True,
autospec=True)
def test_prepare_uefi_whole_disk_image_fail(self,
prepare_node_for_deploy_mock,
pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
task.node.driver_internal_info['is_whole_disk_image'] = True
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.prepare, task)
prepare_node_for_deploy_mock.assert_called_once_with(task)
self.assertFalse(pxe_prepare_mock.called)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'deploy', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
def test_deploy_boot_mode_exists(self, set_persistent_mock,
pxe_deploy_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.deploy(task)
set_persistent_mock.assert_called_with(task, boot_devices.PXE)
pxe_deploy_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_tear_down(self, node_power_action_mock,
update_secure_boot_mode_mock, pxe_tear_down_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
pxe_tear_down_mock.return_value = states.DELETED
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
pxe_tear_down_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy.LOG, 'warn', spec_set=True, autospec=True)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode',
spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_tear_down_handle_exception(self, node_power_action_mock,
update_secure_boot_mode_mock,
exception_mock, pxe_tear_down_mock,
mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
pxe_tear_down_mock.return_value = states.DELETED
exception_mock.IloOperationNotSupported = Exception
update_secure_boot_mode_mock.side_effect = Exception
returned_state = task.driver.deploy.tear_down(task)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
pxe_tear_down_mock.assert_called_once_with(mock.ANY, task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
self.assertTrue(mock_log.called)
self.assertEqual(states.DELETED, returned_state)
class IloPXEVendorPassthruTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPXEVendorPassthruTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='pxe_ilo', driver_info=INFO_DICT)
def test_vendor_routes(self):
expected = ['heartbeat', 'pass_deploy_info',
'pass_bootloader_install_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(sorted(expected), sorted(list(vendor_routes)))
def test_driver_routes(self):
expected = ['lookup']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_routes = task.driver.vendor.driver_routes
self.assertIsInstance(driver_routes, dict)
self.assertEqual(sorted(expected), sorted(list(driver_routes)))
@mock.patch.object(iscsi_deploy.VendorPassthru, 'pass_deploy_info',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
def test_vendorpassthru_pass_deploy_info(self, set_boot_device_mock,
func_update_boot_mode,
func_update_secure_boot_mode,
pxe_vendorpassthru_mock):
kwargs = {'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
task.driver.vendor.pass_deploy_info(task, **kwargs)
set_boot_device_mock.assert_called_with(task, boot_devices.PXE,
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
pxe_vendorpassthru_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
@mock.patch.object(iscsi_deploy.VendorPassthru, 'continue_deploy',
spec_set=True, autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', autospec=True)
def test_vendorpassthru_continue_deploy(self,
func_update_boot_mode,
func_update_secure_boot_mode,
pxe_vendorpassthru_mock):
kwargs = {'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
task.driver.vendor.continue_deploy(task, **kwargs)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
pxe_vendorpassthru_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaAgentVendorInterfaceTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='agent_ilo', driver_info=INFO_DICT)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance',
spec_set=True, autospec=True)
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
def test_reboot_to_instance(self, func_update_secure_boot_mode,
func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
check_deploy_success_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.assert_called_once_with(
mock.ANY, task.node)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
agent_reboot_to_instance_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance',
spec_set=True, autospec=True)
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
autospec=True)
def test_reboot_to_instance_deploy_fail(self, func_update_secure_boot_mode,
func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
check_deploy_success_mock.return_value = "Error"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.assert_called_once_with(
mock.ANY, task.node)
self.assertFalse(func_update_boot_mode.called)
self.assertFalse(func_update_secure_boot_mode.called)
agent_reboot_to_instance_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
|
|
from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import re
from openpyxl.compat import unicode
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
Set,
Float,
DateTime,
NoneSet,
Bool,
Integer,
String,
MatchPattern,
Sequence,
Convertible,
MinMax,
)
from openpyxl.descriptors.excel import ExtensionList, CellRange
from openpyxl.descriptors.sequence import ValueSequence
class SortCondition(Serialisable):
tagname = "sortCondition"
descending = Bool(allow_none=True)
sortBy = NoneSet(values=(['value', 'cellColor', 'fontColor', 'icon']))
ref = CellRange()
customList = String(allow_none=True)
dxfId = Integer(allow_none=True)
iconSet = NoneSet(values=(['3Arrows', '3ArrowsGray', '3Flags',
'3TrafficLights1', '3TrafficLights2', '3Signs', '3Symbols', '3Symbols2',
'4Arrows', '4ArrowsGray', '4RedToBlack', '4Rating', '4TrafficLights',
'5Arrows', '5ArrowsGray', '5Rating', '5Quarters']))
iconId = Integer(allow_none=True)
def __init__(self,
ref=None,
descending=None,
sortBy=None,
customList=None,
dxfId=None,
iconSet=None,
iconId=None,
):
self.descending = descending
self.sortBy = sortBy
self.ref = ref
self.customList = customList
self.dxfId = dxfId
self.iconSet = iconSet
self.iconId = iconId
class SortState(Serialisable):
tagname = "sortState"
columnSort = Bool(allow_none=True)
caseSensitive = Bool(allow_none=True)
sortMethod = NoneSet(values=(['stroke', 'pinYin']))
ref = CellRange()
sortCondition = Sequence(expected_type=SortCondition, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('sortCondition',)
def __init__(self,
columnSort=None,
caseSensitive=None,
sortMethod=None,
ref=None,
sortCondition=(),
extLst=None,
):
self.columnSort = columnSort
self.caseSensitive = caseSensitive
self.sortMethod = sortMethod
self.ref = ref
self.sortCondition = sortCondition
def __bool__(self):
return self.ref is not None
__nonzero__ = __bool__
class IconFilter(Serialisable):
tagname = "iconFilter"
iconSet = Set(values=(['3Arrows', '3ArrowsGray', '3Flags',
'3TrafficLights1', '3TrafficLights2', '3Signs', '3Symbols', '3Symbols2',
'4Arrows', '4ArrowsGray', '4RedToBlack', '4Rating', '4TrafficLights',
'5Arrows', '5ArrowsGray', '5Rating', '5Quarters']))
iconId = Integer(allow_none=True)
def __init__(self,
iconSet=None,
iconId=None,
):
self.iconSet = iconSet
self.iconId = iconId
class ColorFilter(Serialisable):
tagname = "colorFilter"
dxfId = Integer(allow_none=True)
cellColor = Bool(allow_none=True)
def __init__(self,
dxfId=None,
cellColor=None,
):
self.dxfId = dxfId
self.cellColor = cellColor
class DynamicFilter(Serialisable):
tagname = "dynamicFilter"
type = Set(values=(['null', 'aboveAverage', 'belowAverage', 'tomorrow',
'today', 'yesterday', 'nextWeek', 'thisWeek', 'lastWeek', 'nextMonth',
'thisMonth', 'lastMonth', 'nextQuarter', 'thisQuarter', 'lastQuarter',
'nextYear', 'thisYear', 'lastYear', 'yearToDate', 'Q1', 'Q2', 'Q3', 'Q4',
'M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9', 'M10', 'M11',
'M12']))
val = Float(allow_none=True)
valIso = DateTime(allow_none=True)
maxVal = Float(allow_none=True)
maxValIso = DateTime(allow_none=True)
def __init__(self,
type=None,
val=None,
valIso=None,
maxVal=None,
maxValIso=None,
):
self.type = type
self.val = val
self.valIso = valIso
self.maxVal = maxVal
self.maxValIso = maxValIso
class CustomFilter(Serialisable):
tagname = "customFilter"
operator = NoneSet(values=(['equal', 'lessThan', 'lessThanOrEqual',
'notEqual', 'greaterThanOrEqual', 'greaterThan']))
val = String()
def __init__(self,
operator=None,
val=None,
):
self.operator = operator
self.val = val
class CustomFilters(Serialisable):
tagname = "customFilters"
_and = Bool(allow_none=True)
customFilter = Sequence(expected_type=CustomFilter) # min 1, max 2
__elements__ = ('customFilter',)
def __init__(self,
_and=None,
customFilter=(),
):
self._and = _and
self.customFilter = customFilter
class Top10(Serialisable):
tagname = "top10"
top = Bool(allow_none=True)
percent = Bool(allow_none=True)
val = Float()
filterVal = Float(allow_none=True)
def __init__(self,
top=None,
percent=None,
val=None,
filterVal=None,
):
self.top = top
self.percent = percent
self.val = val
self.filterVal = filterVal
class DateGroupItem(Serialisable):
tagname = "dateGroupItem"
year = Integer()
month = MinMax(min=1, max=12, allow_none=True)
day = MinMax(min=1, max=31, allow_none=True)
hour = MinMax(min=0, max=23, allow_none=True)
minute = MinMax(min=0, max=59, allow_none=True)
second = Integer(min=0, max=59, allow_none=True)
dateTimeGrouping = Set(values=(['year', 'month', 'day', 'hour', 'minute',
'second']))
def __init__(self,
year=None,
month=None,
day=None,
hour=None,
minute=None,
second=None,
dateTimeGrouping=None,
):
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.dateTimeGrouping = dateTimeGrouping
class Filters(Serialisable):
tagname = "filters"
blank = Bool(allow_none=True)
calendarType = NoneSet(values=["gregorian","gregorianUs",
"gregorianMeFrench","gregorianArabic", "hijri","hebrew",
"taiwan","japan", "thai","korea",
"saka","gregorianXlitEnglish","gregorianXlitFrench"])
filter = ValueSequence(expected_type=unicode)
dateGroupItem = Sequence(expected_type=DateGroupItem, allow_none=True)
__elements__ = ('filter', 'dateGroupItem')
def __init__(self,
blank=None,
calendarType=None,
filter=(),
dateGroupItem=(),
):
self.blank = blank
self.calendarType = calendarType
self.filter = filter
self.dateGroupItem = dateGroupItem
class FilterColumn(Serialisable):
tagname = "filterColumn"
colId = Integer()
col_id = Alias('colId')
hiddenButton = Bool(allow_none=True)
showButton = Bool(allow_none=True)
# some elements are choice
filters = Typed(expected_type=Filters, allow_none=True)
top10 = Typed(expected_type=Top10, allow_none=True)
customFilters = Typed(expected_type=CustomFilters, allow_none=True)
dynamicFilter = Typed(expected_type=DynamicFilter, allow_none=True)
colorFilter = Typed(expected_type=ColorFilter, allow_none=True)
iconFilter = Typed(expected_type=IconFilter, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('filters', 'top10', 'customFilters', 'dynamicFilter',
'colorFilter', 'iconFilter')
def __init__(self,
colId=None,
hiddenButton=None,
showButton=None,
filters=None,
top10=None,
customFilters=None,
dynamicFilter=None,
colorFilter=None,
iconFilter=None,
extLst=None,
blank=None,
vals=None,
):
self.colId = colId
self.hiddenButton = hiddenButton
self.showButton = showButton
if filters is None:
filters = Filters()
self.filters = filters
self.top10 = top10
self.customFilters = customFilters
self.dynamicFilter = dynamicFilter
self.colorFilter = colorFilter
self.iconFilter = iconFilter
if blank is not None:
self.filters.blank = blank
if vals is not None:
self.filters.filter = vals
class AutoFilter(Serialisable):
tagname = "autoFilter"
ref = CellRange()
filterColumn = Sequence(expected_type=FilterColumn, allow_none=True)
sortState = Typed(expected_type=SortState, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('filterColumn', 'sortState')
def __init__(self,
ref=None,
filterColumn=(),
sortState=None,
extLst=None,
):
self.ref = ref
self.filterColumn = filterColumn
self.sortState = sortState
def __bool__(self):
return self.ref is not None
__nonzero__ = __bool__
def add_filter_column(self, col_id, vals, blank=False):
"""
Add row filter for specified column.
:param col_id: Zero-origin column id. 0 means first column.
:type col_id: int
:param vals: Value list to show.
:type vals: str[]
:param blank: Show rows that have blank cell if True (default=``False``)
:type blank: bool
"""
self.filterColumn.append(FilterColumn(colId=col_id, vals=vals, blank=blank))
def add_sort_condition(self, ref, descending=False):
"""
Add sort condition for cpecified range of cells.
:param ref: range of the cells (e.g. 'A2:A150')
:type ref: string
:param descending: Descending sort order (default=``False``)
:type descending: bool
"""
cond = SortCondition(ref, descending)
if self.sortState is None:
self.sortState = SortState(ref=ref)
self.sortState.sortCondition.append(cond)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
import numpy as np
import pytest
import pytz
from pandas.compat import PY2, product
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, MultiIndex, Series, Timestamp, date_range,
period_range, to_datetime)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import (
assert_frame_equal, assert_index_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods(TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
assert rs.s[1] == 1
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis0(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
result = df.diff(axis=0)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', '1 days']),
1: pd.TimedeltaIndex(['NaT', '1 days'])})
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis1(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
if tz is None:
result = df.diff(axis=1)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', 'NaT']),
1: pd.TimedeltaIndex(['0 days',
'0 days'])})
assert_frame_equal(result, expected)
else:
with pytest.raises(NotImplementedError):
result = df.diff(axis=1)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
assert result[0].dtype == np.float64
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs,
(filled / filled.shift(freq='5D') - 1)
.reindex_like(filled))
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0., 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
@pytest.mark.parametrize("freq, periods, fill_method, limit",
[('5B', 5, None, None),
('3B', 3, None, None),
('3B', 3, 'bfill', None),
('7B', 7, 'pad', 1),
('7B', 7, 'bfill', 3),
('14B', 14, None, None)])
def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
# GH 7292
rs_freq = self.tsframe.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = self.tsframe.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=self.tsframe.index,
columns=self.tsframe.columns)
rs_freq = empty_ts.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = empty_ts.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))
def test_frame_append_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
assert np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert (tmp['dates'].values == ex_vals).all()
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(self.tsframe)
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].dropna().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
msg = 'does not match PeriodIndex freq'
with pytest.raises(ValueError, match=msg):
ps.shift(freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH #24128
df = DataFrame([1, 2, 3, 4, 5],
index=date_range('1/1/2000', periods=5, freq='H'))
exp = DataFrame([0, 1, 2, 3, 4],
index=date_range('1/1/2000', periods=5, freq='H'))
result = df.shift(1, fill_value=0)
assert_frame_equal(result, exp)
exp = DataFrame([0, 0, 1, 2, 3],
index=date_range('1/1/2000', periods=5, freq='H'))
result = df.shift(2, fill_value=0)
assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH 9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
assert_series_equal(nulls, Series(range(1, 6), dtype='int64'))
# check all answers are the same
assert_frame_equal(shifted[0], shifted[1])
assert_frame_equal(shifted[0], shifted[2])
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
with pytest.raises(ValueError, match='does not match'):
ps.tshift(freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
msg = "Freq was not given and was not set in the index"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
with pytest.raises(ValueError, match=msg):
ts.truncate(before=ts.index[-1] - ts.index.freq,
after=ts.index[0] + ts.index.freq)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
assert not (self.tsframe.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self):
# GH 17935
df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e']},
index=[5, 3, 2, 9, 0])
msg = 'truncate requires a sorted index'
with pytest.raises(ValueError, match=msg):
df.truncate(before=3, after=9)
rng = pd.date_range('2011-01-01', '2012-01-01', freq='W')
ts = pd.DataFrame({'A': np.random.randn(len(rng)),
'B': np.random.randn(len(rng))},
index=rng)
msg = 'truncate requires a sorted index'
with pytest.raises(ValueError, match=msg):
ts.sort_values('A', ascending=False).truncate(before='2011-11',
after='2011-12')
df = pd.DataFrame({3: np.random.randn(5),
20: np.random.randn(5),
2: np.random.randn(5),
0: np.random.randn(5)},
columns=[3, 20, 2, 0])
msg = 'truncate requires a sorted index'
with pytest.raises(ValueError, match=msg):
df.truncate(before=2, after=20, axis=1)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
assert isinstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize("data,idx,expected_first,expected_last", [
({'A': [1, 2, 3]}, [1, 1, 2], 1, 2),
({'A': [1, 2, 3]}, [1, 2, 2], 1, 2),
({'A': [1, 2, 3, 4]}, ['d', 'd', 'd', 'd'], 'd', 'd'),
({'A': [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({'A': [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)])
def test_first_last_valid(self, data, idx,
expected_first, expected_last):
N = len(self.frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq='D')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_frame_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_frame_equal(result, expected)
result = ts[:0].first('3M')
assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first('1D')
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq='D')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['2000-01-10':]
assert_frame_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_frame_equal(result, expected)
result = ts[:0].last('3M')
assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last('1D')
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
@pytest.mark.parametrize('hour', ['1:00', '1:00AM', time(1),
time(1, tzinfo=pytz.UTC)])
def test_at_time_errors(self, hour):
# GH 24043
dti = pd.date_range('2018', periods=3, freq='H')
df = pd.DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, 'tzinfo', None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH 24043
dti = pd.date_range('2018', periods=3, freq='H', tz='US/Pacific')
df = pd.DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone('US/Eastern')))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time('00:00')
@pytest.mark.parametrize('axis', ['index', 'columns', 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ['index', 0]:
expected = ts.loc[indices, :]
elif axis in ['columns', 1]:
expected = ts.loc[:, indices]
result = ts.at_time('9:30', axis=axis)
assert_frame_equal(result, expected)
def test_between_time(self, close_open_fixture):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time='00:00', end_time='12:00')
def test_between_time_axis(self, axis):
# issue 8839
rng = date_range('1/1/2000', periods=100, freq='10min')
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ('08:00:00', '09:00:00')
exp_len = 7
if axis in ['index', 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ['columns', 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range('1/1/2000', periods=100, freq='10min')
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ('08:00:00', '09:00:00')
msg = "Index must be DatetimeIndex"
if axis in ['columns', 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ['index', 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
pd.Timestamp('2012-05-01')]})
res = df.min()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, 'nat'], dtype='datetime64[ns]')
result = pd.Series(data_ns).to_frame()
result['new'] = data_ns
expected = pd.DataFrame({0: [1, None],
'new': [1, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, 'nat'], dtype='datetime64[s]')
result['new'] = data_s
expected = pd.DataFrame({0: [1, None],
'new': [1e9, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_frame_to_period(self):
K = 5
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(np.random.randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq('M'))
msg = "No axis named 2 for object type <class 'type'>"
with pytest.raises(ValueError, match=msg):
df.to_period(axis=2)
@pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert'])
def test_tz_convert_and_localize(self, fn):
l0 = date_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
assert not df3.index.levels[0].equals(l0)
assert_index_equal(df3.index.levels[0], l0_expected)
assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)('US/Pacific', level=1)
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
# Bad Inputs
# Not DatetimeIndex / PeriodIndex
with pytest.raises(TypeError, match='DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with pytest.raises(TypeError, match='DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with pytest.raises(ValueError, match='not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for :py:class:`magnum.conductor.rpcapi.API`.
"""
import copy
import mock
from magnum.conductor import api as conductor_rpcapi
from magnum import objects
from magnum.tests.unit.db import base
from magnum.tests.unit.db import utils as dbutils
class RPCAPITestCase(base.DbTestCase):
def setUp(self):
super(RPCAPITestCase, self).setUp()
self.fake_bay = dbutils.get_test_bay(driver='fake-driver')
self.fake_container = dbutils.get_test_container(driver='fake-driver')
self.fake_pod = dbutils.get_test_pod(driver='fake-driver')
self.fake_rc = dbutils.get_test_rc(driver='fake-driver')
self.fake_service = dbutils.get_test_service(driver='fake-driver')
self.fake_x509keypair = dbutils.get_test_x509keypair(
driver='fake-driver')
self.fake_certificate = objects.Certificate.from_db_bay(self.fake_bay)
self.fake_certificate.csr = 'fake-csr'
def _test_rpcapi(self, method, rpc_method, **kwargs):
rpcapi_cls = kwargs.pop('rpcapi_cls', conductor_rpcapi.API)
rpcapi = rpcapi_cls(topic='fake-topic')
expected_retval = 'hello world' if rpc_method == 'call' else None
expected_topic = 'fake-topic'
if 'host' in kwargs:
expected_topic += ".%s" % kwargs['host']
target = {
"topic": expected_topic,
"version": kwargs.pop('version', 1.0)
}
expected_msg = copy.deepcopy(kwargs)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwargs):
for kwd in kwargs:
self.assertEqual(target[kwd], kwargs[kwd])
return rpcapi._client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
with mock.patch.object(rpcapi._client, "prepare") as mock_prepared:
mock_prepared.side_effect = _fake_prepare_method
with mock.patch.object(rpcapi._client, rpc_method) as mock_method:
mock_method.side_effect = _fake_rpc_method
retval = getattr(rpcapi, method)(**kwargs)
self.assertEqual(expected_retval, retval)
expected_args = [None, method, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(expected_arg, arg)
def test_bay_create(self):
self._test_rpcapi('bay_create',
'call',
version='1.0',
bay=self.fake_bay,
bay_create_timeout=15)
def test_bay_delete(self):
self._test_rpcapi('bay_delete',
'call',
version='1.0',
uuid=self.fake_bay['uuid'])
self._test_rpcapi('bay_delete',
'call',
version='1.1',
uuid=self.fake_bay['name'])
def test_bay_update(self):
self._test_rpcapi('bay_update',
'call',
version='1.1',
bay=self.fake_bay['name'])
def test_service_create(self):
self._test_rpcapi('service_create',
'call',
version='1.0',
service=self.fake_service)
def test_service_update(self):
self._test_rpcapi('service_update',
'call',
version='1.0',
service_ident=self.fake_service['uuid'],
bay_ident=self.fake_service['bay_uuid'],
manifest={})
def test_service_delete(self):
self._test_rpcapi('service_delete',
'call',
version='1.0',
service_ident=self.fake_service['uuid'],
bay_ident=self.fake_service['bay_uuid'])
self._test_rpcapi('service_delete',
'call',
version='1.1',
service_ident=self.fake_service['uuid'],
bay_ident=self.fake_service['bay_uuid'])
def test_pod_create(self):
self._test_rpcapi('pod_create',
'call',
version='1.0',
pod=self.fake_pod)
def test_pod_update(self):
self._test_rpcapi('pod_update',
'call',
version='1.1',
pod_ident=self.fake_pod['uuid'],
bay_ident=self.fake_pod['bay_uuid'],
manifest={})
def test_pod_delete(self):
self._test_rpcapi('pod_delete',
'call',
version='1.0',
pod_ident=self.fake_pod['uuid'],
bay_ident=self.fake_pod['bay_uuid'])
self._test_rpcapi('pod_delete',
'call',
version='1.1',
pod_ident=self.fake_pod['uuid'],
bay_ident=self.fake_pod['bay_uuid'])
def test_rc_create(self):
self._test_rpcapi('rc_create',
'call',
version='1.0',
rc=self.fake_rc)
def test_rc_update(self):
self._test_rpcapi('rc_update',
'call',
version='1.0',
rc_ident=self.fake_rc['uuid'],
bay_ident=self.fake_rc['bay_uuid'],
manifest={})
def test_rc_delete(self):
self._test_rpcapi('rc_delete',
'call',
version='1.0',
rc_ident=self.fake_rc['uuid'],
bay_ident=self.fake_rc['bay_uuid'])
self._test_rpcapi('rc_delete',
'call',
version='1.1',
rc_ident=self.fake_rc['uuid'],
bay_ident=self.fake_rc['bay_uuid'])
def test_container_create(self):
self._test_rpcapi('container_create',
'call',
version='1.0',
container=self.fake_container)
def test_container_delete(self):
self._test_rpcapi('container_delete',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_show(self):
self._test_rpcapi('container_show',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_reboot(self):
self._test_rpcapi('container_reboot',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_stop(self):
self._test_rpcapi('container_stop',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_start(self):
self._test_rpcapi('container_start',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_pause(self):
self._test_rpcapi('container_pause',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_unpause(self):
self._test_rpcapi('container_unpause',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_logs(self):
self._test_rpcapi('container_logs',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_exec(self):
self._test_rpcapi('container_exec',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'],
command=self.fake_container['command'])
def test_ping_conductor(self):
self._test_rpcapi('ping_conductor',
'call',
rpcapi_cls=conductor_rpcapi.ListenerAPI,
version='1.0')
def test_x509keypair_create(self):
self._test_rpcapi('x509keypair_create',
'call',
version='1.0',
x509keypair=self.fake_x509keypair)
def test_x509keypair_delete(self):
self._test_rpcapi('x509keypair_delete',
'call',
version='1.0',
uuid=self.fake_x509keypair['uuid'])
self._test_rpcapi('x509keypair_delete',
'call',
version='1.1',
uuid=self.fake_x509keypair['name'])
def test_sign_certificate(self):
self._test_rpcapi('sign_certificate',
'call',
version='1.0',
bay=self.fake_bay,
certificate=self.fake_certificate)
def test_get_ca_certificate(self):
self._test_rpcapi('get_ca_certificate',
'call',
version='1.0',
bay=self.fake_bay)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations that generate constants.
See the [constants guide](https://tensorflow.org/api_guides/python/constant_op).
"""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.util.tf_export import tf_export
def _eager_reshape(tensor, shape, ctx):
"""Eager-only version of Reshape op; requires tensor is an eager Tensor."""
attr_t = tensor._datatype_enum() # pylint: disable=protected-access
attr_tshape, (shape,) = execute.args_to_matching_eager(
[shape], ctx, dtypes.int32)
inputs_flat = [tensor, shape]
attrs = ("T", attr_t, "Tshape", attr_tshape)
result, = execute.execute(
b"Reshape", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_fill(dims, value, ctx):
"""Eager-only version of Fill op; requires value is an eager Tensor."""
attr_t = value.dtype.as_datatype_enum
dims = convert_to_eager_tensor(dims, ctx, dtypes.int32)
inputs_flat = [dims, value]
attrs = ("T", attr_t, "index_type", types_pb2.DT_INT32)
result, = execute.execute(
b"Fill", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_identity(tensor, ctx):
"""Eager-only version of Identity op; requires tensor is an eager Tensor."""
attrs = ("T", tensor.dtype.as_datatype_enum)
result, = execute.execute(
b"Identity", 1, inputs=[tensor], attrs=attrs, ctx=ctx)
return result
def convert_to_eager_tensor(value, ctx, dtype=None):
"""Converts the given `value` to an `EagerTensor`.
Note that this function could return cached copies of created constants for
performance reasons.
Args:
value: value to convert to EagerTensor.
ctx: value of context.context().
dtype: optional desired dtype of the converted EagerTensor.
Returns:
EagerTensor created from value.
Raises:
TypeError: if `dtype` is not compatible with the type of t.
"""
if isinstance(value, ops.EagerTensor):
if dtype is not None and value.dtype != dtype:
raise TypeError("Expected tensor with type %r not %r" % (
dtype, value.dtype))
return value
if dtype is not None:
try:
dtype = dtype.as_datatype_enum
except AttributeError:
dtype = dtypes.as_dtype(dtype).as_datatype_enum
ctx.ensure_initialized()
return ops.EagerTensor(value, ctx.device_name, dtype)
@tf_export(v1=["constant"])
def constant_v1(
value, dtype=None, shape=None, name="Const", verify_shape=False):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions of
the resulting tensor. If not present, the shape of `value` is used.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
`tf.constant` differs from `tf.fill` in a few ways:
* `tf.constant` supports arbitrary constants, not just uniform scalar
Tensors like `tf.fill`.
* `tf.constant` creates a `Const` node in the computation graph with the
exact value at graph construction time. On the other hand, `tf.fill`
creates an Op in the graph that is expanded at runtime.
* Because `tf.constant` only embeds constant values in the graph, it does
not support dynamic shapes based on other runtime Tensors, whereas
`tf.fill` does.
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A Constant Tensor.
Raises:
TypeError: if shape is incorrectly specified or unsupported.
"""
return _constant_impl(value, dtype, shape, name, verify_shape=verify_shape,
allow_broadcast=False)
@tf_export("constant", v1=[])
def constant(value, dtype=None, shape=None, name="Const"):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions of
the resulting tensor. If not present, the shape of `value` is used.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6]) => [1 2 3 4 5 6]
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6], shape=(2,3))
=> [[1 2 3], [4 5 6]]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
`tf.constant` differs from `tf.fill` in a few ways:
* `tf.constant` supports arbitrary constants, not just uniform scalar
Tensors like `tf.fill`.
* `tf.constant` creates a `Const` node in the computation graph with the
exact value at graph construction time. On the other hand, `tf.fill`
creates an Op in the graph that is expanded at runtime.
* Because `tf.constant` only embeds constant values in the graph, it does
not support dynamic shapes based on other runtime Tensors, whereas
`tf.fill` does.
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
Raises:
TypeError: if shape is incorrectly specified or unsupported.
"""
return _constant_impl(value, dtype, shape, name, verify_shape=False,
allow_broadcast=True)
def _constant_impl(
value, dtype, shape, name, verify_shape, allow_broadcast):
"""Implementation of constant."""
ctx = context.context()
if ctx.executing_eagerly():
t = convert_to_eager_tensor(value, ctx, dtype)
if shape is None:
return t
shape = tensor_shape.as_shape(shape)
if shape == t.shape:
return t
if verify_shape:
raise TypeError("Expected Tensor's shape: %s, got %s." % (tuple(shape),
tuple(t.shape)))
num_t = t.shape.num_elements()
# TODO(josh11b): Implement shape -> eager tensor conversion.
if num_t == shape.num_elements():
return _eager_reshape(t, shape.as_list(), ctx)
if num_t == 1:
if t.dtype == dtypes.bool:
# We don't have a Fill kernel for bool dtype on GPU. So we first run
# Fill on CPU and then copy to GPU if needed.
with ops.device("/device:CPU:0"):
x = _eager_fill(shape.as_list(), _eager_identity(t, ctx), ctx)
return _eager_identity(x, ctx)
else:
return _eager_fill(shape.as_list(), t, ctx)
raise TypeError("Eager execution of tf.constant with unsupported shape "
"(value has %d elements, shape is %s with %d elements)." %
(num_t, shape, shape.num_elements()))
g = ops.get_default_graph()
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape, verify_shape=verify_shape,
allow_broadcast=allow_broadcast))
dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
const_tensor = g._create_op_internal( # pylint: disable=protected-access
"Const", [], [dtype_value.type],
attrs={"value": tensor_value,
"dtype": dtype_value},
name=name).outputs[0]
return const_tensor
def is_constant(tensor_or_op):
if isinstance(tensor_or_op, ops.Tensor):
op = tensor_or_op.op
else:
op = tensor_or_op
return op.type == "Const"
def _constant_tensor_conversion_function(v, dtype=None, name=None,
as_ref=False):
_ = as_ref
return constant(v, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
(list, tuple), _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
object, _constant_tensor_conversion_function, 200)
def _tensor_shape_tensor_conversion_function(s,
dtype=None,
name=None,
as_ref=False):
"""Function to convert TensorShape to Tensor."""
_ = as_ref
if not s.is_fully_defined():
raise ValueError(
"Cannot convert a partially known TensorShape to a Tensor: %s" % s)
s_list = s.as_list()
int64_value = 0
for dim in s_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
if dtype == dtypes.int32 and int64_value:
raise ValueError("Cannot convert a TensorShape to dtype int32; "
"a dimension is too large (%s)" % int64_value)
else:
dtype = dtypes.int64 if int64_value else dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(s_list, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.TensorShape, _tensor_shape_tensor_conversion_function, 100)
def _dimension_tensor_conversion_function(d,
dtype=None,
name=None,
as_ref=False):
"""Function to convert Dimension to Tensor."""
_ = as_ref
if d.value is None:
raise ValueError("Cannot convert an unknown Dimension to a Tensor: %s" % d)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(d.value, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.Dimension, _dimension_tensor_conversion_function, 100)
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module implementing classes and functions to use Zeo++
by Maciej Haranczyk.
If using this module, cite the following paper on Zeo++:
T.F. Willems, C.H. Rycroft, M. Kazi, J.C. Meza, and M. Haranczyk,
Algorithms and tools for high-throughput geometry-based analysis of crystalline porous materials,
Microporous and Mesoporous Materials, 149 (2012) 134-141.
Zeo++ Installation Steps:
========================
A stable version of Zeo++ can be obtained from http://zeoplusplus.org.
Instructions can be found at http://www.zeoplusplus.org/download.html
Zeo++ Post-Installation Checking:
==============================
1) Go to pymatgen/io/tests and run "python test_zeoio.py"
If Zeo++ python bindings are properly installed, the tests should
pass. One or two tests will be skipped.
b) Go to pymatgen/analysis/defects/tests and run
"python test_point_defects.py". Lots of tests will be skipped if GULP
is not installed. But there should be no errors.
"""
import os
import re
from monty.dev import requires
from monty.io import zopen
from monty.tempfile import ScratchDir
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cssr import Cssr
from pymatgen.io.xyz import XYZ
try:
from zeo.area_volume import surface_area, volume
from zeo.cluster import prune_voronoi_network_close_node
from zeo.netstorage import AtomNetwork
zeo_found = True
except ImportError:
zeo_found = False
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "[email protected]"
__data__ = "Aug 2, 2013"
class ZeoCssr(Cssr):
"""
ZeoCssr adds extra fields to CSSR sites to conform with Zeo++
input CSSR format. The coordinate system is rotated from xyz to zyx.
This change aligns the pivot axis of pymatgen (z-axis) to pivot axis
of Zeo++ (x-axis) for structurural modifications.
"""
def __init__(self, structure):
"""
Args:
structure: A structure to create ZeoCssr object
"""
super().__init__(structure)
def __str__(self):
"""
CSSR.__str__ method is modified to padd 0's to the CSSR site data.
The padding is to conform with the CSSR format supported Zeo++.
The oxidation state is stripped from site.specie
Also coordinate system is rotated from xyz to zxy
"""
output = [
"{:.4f} {:.4f} {:.4f}".format(
self.structure.lattice.c,
self.structure.lattice.a,
self.structure.lattice.b,
),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1".format(
self.structure.lattice.gamma,
self.structure.lattice.alpha,
self.structure.lattice.beta,
),
f"{len(self.structure)} 0",
f"0 {self.structure.formula}",
]
for i, site in enumerate(self.structure.sites):
# if not hasattr(site, 'charge'):
# charge = 0
# else:
# charge = site.charge
charge = site.charge if hasattr(site, "charge") else 0
# specie = site.specie.symbol
specie = site.species_string
output.append(
"{} {} {:.4f} {:.4f} {:.4f} 0 0 0 0 0 0 0 0 {:.4f}".format(
i + 1, specie, site.c, site.a, site.b, charge
)
)
return "\n".join(output)
@staticmethod
def from_string(string):
"""
Reads a string representation to a ZeoCssr object.
Args:
string: A string representation of a ZeoCSSR.
Returns:
ZeoCssr object.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(i) for i in toks]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
a = lengths.pop(-1)
lengths.insert(0, a)
alpha = angles.pop(-1)
angles.insert(0, alpha)
latt = Lattice.from_parameters(*lengths, *angles)
sp = []
coords = []
chrg = []
for l in lines[4:]:
m = re.match(
r"\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" + r"([0-9\-\.]+)\s+(?:0\s+){8}([0-9\-\.]+)",
l.strip(),
)
if m:
sp.append(m.group(1))
# coords.append([float(m.group(i)) for i in xrange(2, 5)])
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
coords.append([float(m.group(i)) for i in [3, 4, 2]])
chrg.append(m.group(5))
return ZeoCssr(Structure(latt, sp, coords, site_properties={"charge": chrg}))
@staticmethod
def from_file(filename):
"""
Reads a CSSR file to a ZeoCssr object.
Args:
filename: Filename to read from.
Returns:
ZeoCssr object.
"""
with zopen(filename, "r") as f:
return ZeoCssr.from_string(f.read())
class ZeoVoronoiXYZ(XYZ):
"""
Class to read Voronoi Nodes from XYZ file written by Zeo++.
The sites have an additional column representing the voronoi node radius.
The voronoi node radius is represented by the site property voronoi_radius.
"""
def __init__(self, mol):
"""
Args:
mol: Input molecule holding the voronoi node information
"""
super().__init__(mol)
@staticmethod
def from_string(contents):
"""
Creates Zeo++ Voronoi XYZ object from a string.
from_string method of XYZ class is being redefined.
Args:
contents: String representing Zeo++ Voronoi XYZ file.
Returns:
ZeoVoronoiXYZ object
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
prop = []
coord_patt = re.compile(r"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" + r"([0-9\-\.]+)")
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# coords.append(map(float, m.groups()[1:4])) # this is 0-indexed
coords.append([float(j) for j in [m.group(i) for i in [3, 4, 2]]])
prop.append(float(m.group(5)))
return ZeoVoronoiXYZ(Molecule(sp, coords, site_properties={"voronoi_radius": prop}))
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return ZeoVoronoiXYZ.from_string(f.read())
def __str__(self):
output = [str(len(self._mols[0])), self._mols[0].composition.formula]
fmtstr = f"{{}} {{:.{self.precision}f}} {{:.{self.precision}f}} {{:.{self.precision}f}} {{:.{self.precision}f}}"
for site in self._mols[0]:
output.append(
fmtstr.format(
site.specie.symbol,
site.z,
site.x,
site.y,
site.properties["voronoi_radius"],
)
)
return "\n".join(output)
@requires(
zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.",
)
def get_voronoi_nodes(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Structure within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Structure within the
unit cell defined by the lattice of input structure
"""
with ScratchDir("."):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, "w+") as fp:
for el in rad_dict.keys():
fp.write(f"{el} {rad_dict[el].real}\n")
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
(
vornet,
vor_edge_centers,
vor_face_centers,
) = atmnet.perform_voronoi_decomposition()
vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + "_voro.xyz"
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties["voronoi_radius"])
lattice = Lattice.from_parameters(*structure.lattice.parameters)
vor_node_struct = Structure(
lattice,
species,
coords,
coords_are_cartesian=True,
to_unit_cell=True,
site_properties={"voronoi_radius": prop},
)
# PMG-Zeo c<->a transformation for voronoi face centers
rot_face_centers = [(center[1], center[2], center[0]) for center in vor_face_centers]
rot_edge_centers = [(center[1], center[2], center[0]) for center in vor_edge_centers]
species = ["X"] * len(rot_face_centers)
prop = [0.0] * len(rot_face_centers) # Vor radius not evaluated for fc
vor_facecenter_struct = Structure(
lattice,
species,
rot_face_centers,
coords_are_cartesian=True,
to_unit_cell=True,
site_properties={"voronoi_radius": prop},
)
species = ["X"] * len(rot_edge_centers)
prop = [0.0] * len(rot_edge_centers) # Vor radius not evaluated for fc
vor_edgecenter_struct = Structure(
lattice,
species,
rot_edge_centers,
coords_are_cartesian=True,
to_unit_cell=True,
site_properties={"voronoi_radius": prop},
)
return vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):
"""
Analyze the void space in the input structure using high accuracy
voronoi decomposition.
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms.
Default is 0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Structure within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Structure within the
unit cell defined by the lattice of input structure
"""
with ScratchDir("."):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_flag = True
rad_file = name + ".rad"
with open(rad_file, "w+") as fp:
for el in rad_dict.keys():
print(f"{el} {rad_dict[el].real}", file=fp)
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
# vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = prune_voronoi_network_close_node(atmnet)
# generate_simplified_highaccuracy_voronoi_network(atmnet)
# get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + "_voro.xyz"
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties["voronoi_radius"])
lattice = Lattice.from_parameters(*structure.lattice.parameters)
vor_node_struct = Structure(
lattice,
species,
coords,
coords_are_cartesian=True,
to_unit_cell=True,
site_properties={"voronoi_radius": prop},
)
return vor_node_struct
@requires(
zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.",
)
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Structure within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Structure within the
unit cell defined by the lattice of input structure
"""
with ScratchDir("."):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, "w+") as fp:
for el in rad_dict.keys():
fp.write(f"{el} {rad_dict[el].real}\n")
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file) as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {
"inc_sph_max_dia": fields[0],
"free_sph_max_dia": fields[1],
"inc_sph_along_free_sph_path_max_dia": fields[2],
}
return free_sphere_params
# Deprecated. Not needed anymore
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1):
"""
Computes the volume and surface area of isolated void using Zeo++.
Useful to compute the volume and surface area of vacant site.
Args:
structure: pymatgen Structure containing vacancy
rad_dict(optional): Dictionary with short name of elements and their
radii.
chan_rad(optional): Minimum channel Radius.
probe_rad(optional): Probe radius for Monte Carlo sampling.
Returns:
volume: floating number representing the volume of void
"""
with ScratchDir("."):
name = "temp_zeo"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
if rad_dict:
rad_file = name + ".rad"
with open(rad_file, "w") as fp:
for el in rad_dict.keys():
fp.write(f"{el} {rad_dict[el]}")
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)
vol_str = volume(atmnet, 0.3, probe_rad, 10000)
sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)
vol = None
sa = None
for line in vol_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
vol = -1.0
break
if float(fields[1]) == 0:
vol = -1.0
break
vol = float(fields[3])
for line in sa_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
# raise ValueError("Too many voids")
sa = -1.0
break
if float(fields[1]) == 0:
sa = -1.0
break
sa = float(fields[3])
if not vol or not sa:
raise ValueError("Error in zeo++ output stream")
return vol, sa
|
|
import pytest
from django.contrib.auth.models import Permission, Group
from core.models import Event, User
from pictures.models import StockPicture
from sponsor.models import Donor
@pytest.fixture(autouse=True)
def enable_db_access_for_all_tests(db):
pass
@pytest.fixture()
def user(db, django_user_model, django_username_field):
"""This is a copy from pytest-django prepared for usage with e-mail instead
of username.
"""
UserModel = django_user_model
username_field = django_username_field
try:
user = UserModel._default_manager.get(**{username_field: '[email protected]'})
except UserModel.DoesNotExist:
extra_fields = {}
user = UserModel._default_manager.create_user(
'[email protected]', 'password', **extra_fields)
return user
@pytest.fixture()
def admin_user(db, django_user_model, django_username_field):
"""This is a copy from pytest-django prepared for usage with e-mail instead
of username.
"""
UserModel = django_user_model
username_field = django_username_field
try:
user = UserModel._default_manager.get(**{username_field: '[email protected]'})
except UserModel.DoesNotExist:
extra_fields = {}
user = UserModel._default_manager.create_superuser(
'[email protected]', 'password', **extra_fields)
return user
@pytest.fixture()
def admin_client(db, admin_user):
"""A Django test client logged in as an admin user."""
from django.test.client import Client
client = Client()
client.force_login(admin_user)
return client
@pytest.fixture()
def user_client(db, user):
"""A Django test client logged in as an user."""
from django.test.client import Client
client = Client()
client.force_login(user)
return client
@pytest.fixture()
def organizers_group():
add_event_permission = Permission.objects.get(codename='add_event')
change_event_permission = Permission.objects.get(codename='change_event')
group = Group.objects.create(name="Organizers")
group.permissions.set([add_event_permission, change_event_permission])
return group
@pytest.fixture()
def superuser():
return User.objects.create(
first_name="Super",
last_name="Girl",
email="[email protected]",
is_active=True,
is_superuser=True,
is_staff=True)
@pytest.fixture()
def organizer_peter(organizers_group):
user = User.objects.create(
first_name="Peter",
last_name="Pan",
email="[email protected]",
password="",
is_active=True,
is_superuser=False,
is_staff=True)
user.groups.add(organizers_group)
return user
@pytest.fixture()
def organizer_julia(organizers_group):
user = User.objects.create(
first_name="Julia",
last_name="Ailuj",
email="[email protected]",
password="",
is_active=True,
is_superuser=False,
is_staff=True)
user.groups.add(organizers_group)
return user
@pytest.fixture()
def future_event(organizer_peter):
event = Event.objects.create(
email="[email protected]",
city="Bonn",
name="Django Girls Bonn",
country="the Neverlands",
is_on_homepage=True,
main_organizer=organizer_peter,
date="2080-01-01",
page_url="bonn",
is_page_live=True)
event.team.add(organizer_peter)
return event
@pytest.fixture()
def past_event(organizer_peter):
event = Event.objects.create(
email="[email protected]",
city="Rome",
name="Django Girls Rome",
country="Italy",
latlng="41.8933203, 12.4829321",
is_on_homepage=True,
main_organizer=organizer_peter,
date="2013-10-12",
page_url="rome",
is_page_live=True)
event.team.add(organizer_peter)
return event
@pytest.fixture()
def hidden_event(superuser):
event = Event.objects.create(
email="[email protected]",
city="Rome",
name="Django Girls Rome",
country="Italy",
is_on_homepage=False,
main_organizer=superuser,
date="2080-09-02",
page_url="rome",
is_page_live=False)
event.team.add(superuser)
return event
@pytest.fixture()
def diff_url_event(superuser):
event = Event.objects.create(
email="[email protected]",
city="Foo",
name="Django Girls Foo",
country="Italy",
is_on_homepage=False,
main_organizer=superuser,
date="2080-09-02",
page_url="bar",
is_page_live=False
)
event.team.add(superuser)
return event
@pytest.fixture()
def no_date_event(superuser):
event = Event.objects.create(
email="[email protected]",
city="Venice",
name="Django Girls Venice",
country="Italy",
is_on_homepage=False,
main_organizer=superuser,
page_url="venice",
is_page_live=False)
event.team.add(superuser)
return event
@pytest.fixture()
def events(future_event, past_event, hidden_event, no_date_event):
return [future_event, past_event, hidden_event, no_date_event]
@pytest.fixture()
def stock_pictures():
StockPicture.objects.bulk_create([
StockPicture(
photo="stock_pictures/city_one.jpg",
photo_credit="Someone",
photo_link="https://djangogirls.org",
kind=StockPicture.COVER),
StockPicture(
photo="stock_pictures/city_two.jpg",
photo_credit="Someone Else",
photo_link="https://djangogirls.org",
kind=StockPicture.COVER)])
@pytest.fixture()
def visible_donors():
donors = Donor.objects.bulk_create([
Donor(name="Ola", amount=50, visible=True),
Donor(name="Aisha", amount=50, visible=True),
Donor(name="Claire", amount=20, visible=True),
Donor(name="Rachel", amount=100, visible=True)
])
return donors
@pytest.fixture()
def hidden_donors():
hidden_donors = Donor.objects.bulk_create([
Donor(name="Gift", amount=20, visible=False),
Donor(name="Anna", amount=10, visible=False),
Donor(name="Matthew", amount=50, visible=False),
Donor(name="Tanaka", amount=100, visible=False)
])
return hidden_donors
|
|
"""This nodule defines helper functions to deal with native calls."""
import hashlib
import logging
from typing import List, Union
from ethereum.utils import ecrecover_to_pub
from py_ecc.secp256k1 import N as secp256k1n
import py_ecc.optimized_bn128 as bn128
from rlp.utils import ALL_BYTES
from mythril.laser.ethereum.state.calldata import BaseCalldata, ConcreteCalldata
from mythril.laser.ethereum.util import extract_copy, extract32
from ethereum.utils import (
sha3,
big_endian_to_int,
safe_ord,
zpad,
int_to_big_endian,
encode_int32,
)
from ethereum.specials import validate_point
log = logging.getLogger(__name__)
class NativeContractException(Exception):
"""An exception denoting an error during a native call."""
pass
def ecrecover(data: List[int]) -> List[int]:
"""
:param data:
:return:
"""
# TODO: Add type hints
try:
bytes_data = bytearray(data)
v = extract32(bytes_data, 32)
r = extract32(bytes_data, 64)
s = extract32(bytes_data, 96)
except TypeError:
raise NativeContractException
message = b"".join([ALL_BYTES[x] for x in bytes_data[0:32]])
if r >= secp256k1n or s >= secp256k1n or v < 27 or v > 28:
return []
try:
pub = ecrecover_to_pub(message, v, r, s)
except Exception as e:
log.debug("An error has occured while extracting public key: " + str(e))
return []
o = [0] * 12 + [x for x in sha3(pub)[-20:]]
return list(bytearray(o))
def sha256(data: List[int]) -> List[int]:
"""
:param data:
:return:
"""
try:
bytes_data = bytes(data)
except TypeError:
raise NativeContractException
return list(bytearray(hashlib.sha256(bytes_data).digest()))
def ripemd160(data: List[int]) -> List[int]:
"""
:param data:
:return:
"""
try:
bytes_data = bytes(data)
except TypeError:
raise NativeContractException
digest = hashlib.new("ripemd160", bytes_data).digest()
padded = 12 * [0] + list(digest)
return list(bytearray(bytes(padded)))
def identity(data: List[int]) -> List[int]:
"""
:param data:
:return:
"""
# Group up into an array of 32 byte words instead
# of an array of bytes. If saved to memory, 32 byte
# words are currently needed, but a correct memory
# implementation would be byte indexed for the most
# part.
return data
def mod_exp(data: List[int]) -> List[int]:
"""
TODO: Some symbolic parts can be handled here
Modular Exponentiation
:param data: Data with <length_of_BASE> <length_of_EXPONENT> <length_of_MODULUS> <BASE> <EXPONENT> <MODULUS>
:return: modular exponentiation
"""
bytes_data = bytearray(data)
baselen = extract32(bytes_data, 0)
explen = extract32(bytes_data, 32)
modlen = extract32(bytes_data, 64)
if baselen == 0:
return [0] * modlen
if modlen == 0:
return []
first_exp_bytes = extract32(bytes_data, 96 + baselen) >> (8 * max(32 - explen, 0))
while first_exp_bytes:
first_exp_bytes >>= 1
base = bytearray(baselen)
extract_copy(bytes_data, base, 0, 96, baselen)
exp = bytearray(explen)
extract_copy(bytes_data, exp, 0, 96 + baselen, explen)
mod = bytearray(modlen)
extract_copy(bytes_data, mod, 0, 96 + baselen + explen, modlen)
if big_endian_to_int(mod) == 0:
return [0] * modlen
o = pow(big_endian_to_int(base), big_endian_to_int(exp), big_endian_to_int(mod))
return [safe_ord(x) for x in zpad(int_to_big_endian(o), modlen)]
def ec_add(data: List[int]) -> List[int]:
bytes_data = bytearray(data)
x1 = extract32(bytes_data, 0)
y1 = extract32(bytes_data, 32)
x2 = extract32(bytes_data, 64)
y2 = extract32(bytes_data, 96)
p1 = validate_point(x1, y1)
p2 = validate_point(x2, y2)
if p1 is False or p2 is False:
return []
o = bn128.normalize(bn128.add(p1, p2))
return [safe_ord(x) for x in (encode_int32(o[0].n) + encode_int32(o[1].n))]
def ec_mul(data: List[int]) -> List[int]:
bytes_data = bytearray(data)
x = extract32(bytes_data, 0)
y = extract32(bytes_data, 32)
m = extract32(bytes_data, 64)
p = validate_point(x, y)
if p is False:
return []
o = bn128.normalize(bn128.multiply(p, m))
return [safe_ord(c) for c in (encode_int32(o[0].n) + encode_int32(o[1].n))]
def ec_pair(data: List[int]) -> List[int]:
if len(data) % 192:
return []
zero = (bn128.FQ2.one(), bn128.FQ2.one(), bn128.FQ2.zero())
exponent = bn128.FQ12.one()
bytes_data = bytearray(data)
for i in range(0, len(bytes_data), 192):
x1 = extract32(bytes_data, i)
y1 = extract32(bytes_data, i + 32)
x2_i = extract32(bytes_data, i + 64)
x2_r = extract32(bytes_data, i + 96)
y2_i = extract32(bytes_data, i + 128)
y2_r = extract32(bytes_data, i + 160)
p1 = validate_point(x1, y1)
if p1 is False:
return []
for v in (x2_i, x2_r, y2_i, y2_r):
if v >= bn128.field_modulus:
return []
fq2_x = bn128.FQ2([x2_r, x2_i])
fq2_y = bn128.FQ2([y2_r, y2_i])
if (fq2_x, fq2_y) != (bn128.FQ2.zero(), bn128.FQ2.zero()):
p2 = (fq2_x, fq2_y, bn128.FQ2.one())
if not bn128.is_on_curve(p2, bn128.b2):
return []
else:
p2 = zero
if bn128.multiply(p2, bn128.curve_order)[-1] != bn128.FQ2.zero():
return []
exponent *= bn128.pairing(p2, p1, final_exponentiate=False)
result = bn128.final_exponentiate(exponent) == bn128.FQ12.one()
return [0] * 31 + [1 if result else 0]
PRECOMPILE_FUNCTIONS = (
ecrecover,
sha256,
ripemd160,
identity,
mod_exp,
ec_add,
ec_mul,
ec_pair,
)
PRECOMPILE_COUNT = len(PRECOMPILE_FUNCTIONS)
def native_contracts(address: int, data: BaseCalldata) -> List[int]:
"""Takes integer address 1, 2, 3, 4.
:param address:
:param data:
:return:
"""
if isinstance(data, ConcreteCalldata):
concrete_data = data.concrete(None)
else:
raise NativeContractException()
return PRECOMPILE_FUNCTIONS[address - 1](concrete_data)
|
|
# -*- test-case-name: twisted.web.test.test_xml -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Micro Document Object Model: a partial DOM implementation with SUX.
This is an implementation of what we consider to be the useful subset of the
DOM. The chief advantage of this library is that, not being burdened with
standards compliance, it can remain very stable between versions. We can also
implement utility 'pythonic' ways to access and mutate the XML tree.
Since this has not subjected to a serious trial by fire, it is not recommended
to use this outside of Twisted applications. However, it seems to work just
fine for the documentation generator, which parses a fairly representative
sample of XML.
Microdom mainly focuses on working with HTML and XHTML.
"""
# System Imports
import re
from cStringIO import StringIO
from types import StringTypes, UnicodeType
# Twisted Imports
from twisted.python.compat import range
from twisted.python.util import InsensitiveDict
from twisted.web.sux import XMLParser, ParseError
def getElementsByTagName(iNode, name):
"""
Return a list of all child elements of C{iNode} with a name matching
C{name}.
Note that this implementation does not conform to the DOM Level 1 Core
specification because it may return C{iNode}.
@param iNode: An element at which to begin searching. If C{iNode} has a
name matching C{name}, it will be included in the result.
@param name: A C{str} giving the name of the elements to return.
@return: A C{list} of direct or indirect child elements of C{iNode} with
the name C{name}. This may include C{iNode}.
"""
matches = []
matches_append = matches.append # faster lookup. don't do this at home
slice = [iNode]
while len(slice)>0:
c = slice.pop(0)
if c.nodeName == name:
matches_append(c)
slice[:0] = c.childNodes
return matches
def getElementsByTagNameNoCase(iNode, name):
name = name.lower()
matches = []
matches_append = matches.append
slice=[iNode]
while len(slice)>0:
c = slice.pop(0)
if c.nodeName.lower() == name:
matches_append(c)
slice[:0] = c.childNodes
return matches
# order is important
HTML_ESCAPE_CHARS = (('&', '&'), # don't add any entities before this one
('<', '<'),
('>', '>'),
('"', '"'))
REV_HTML_ESCAPE_CHARS = list(HTML_ESCAPE_CHARS)
REV_HTML_ESCAPE_CHARS.reverse()
XML_ESCAPE_CHARS = HTML_ESCAPE_CHARS + (("'", '''),)
REV_XML_ESCAPE_CHARS = list(XML_ESCAPE_CHARS)
REV_XML_ESCAPE_CHARS.reverse()
def unescape(text, chars=REV_HTML_ESCAPE_CHARS):
"Perform the exact opposite of 'escape'."
for s, h in chars:
text = text.replace(h, s)
return text
def escape(text, chars=HTML_ESCAPE_CHARS):
"Escape a few XML special chars with XML entities."
for s, h in chars:
text = text.replace(s, h)
return text
class MismatchedTags(Exception):
def __init__(self, filename, expect, got, endLine, endCol, begLine, begCol):
(self.filename, self.expect, self.got, self.begLine, self.begCol, self.endLine,
self.endCol) = filename, expect, got, begLine, begCol, endLine, endCol
def __str__(self):
return ("expected </%s>, got </%s> line: %s col: %s, began line: %s col: %s"
% (self.expect, self.got, self.endLine, self.endCol, self.begLine,
self.begCol))
class Node(object):
nodeName = "Node"
def __init__(self, parentNode=None):
self.parentNode = parentNode
self.childNodes = []
def isEqualToNode(self, other):
"""
Compare this node to C{other}. If the nodes have the same number of
children and corresponding children are equal to each other, return
C{True}, otherwise return C{False}.
@type other: L{Node}
@rtype: C{bool}
"""
if len(self.childNodes) != len(other.childNodes):
return False
for a, b in zip(self.childNodes, other.childNodes):
if not a.isEqualToNode(b):
return False
return True
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
raise NotImplementedError()
def toxml(self, indent='', addindent='', newl='', strip=0, nsprefixes={},
namespace=''):
s = StringIO()
self.writexml(s, indent, addindent, newl, strip, nsprefixes, namespace)
rv = s.getvalue()
return rv
def writeprettyxml(self, stream, indent='', addindent=' ', newl='\n', strip=0):
return self.writexml(stream, indent, addindent, newl, strip)
def toprettyxml(self, indent='', addindent=' ', newl='\n', strip=0):
return self.toxml(indent, addindent, newl, strip)
def cloneNode(self, deep=0, parent=None):
raise NotImplementedError()
def hasChildNodes(self):
if self.childNodes:
return 1
else:
return 0
def appendChild(self, child):
"""
Make the given L{Node} the last child of this node.
@param child: The L{Node} which will become a child of this node.
@raise TypeError: If C{child} is not a C{Node} instance.
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
self.childNodes.append(child)
child.parentNode = self
def insertBefore(self, new, ref):
"""
Make the given L{Node} C{new} a child of this node which comes before
the L{Node} C{ref}.
@param new: A L{Node} which will become a child of this node.
@param ref: A L{Node} which is already a child of this node which
C{new} will be inserted before.
@raise TypeError: If C{new} or C{ref} is not a C{Node} instance.
@return: C{new}
"""
if not isinstance(new, Node) or not isinstance(ref, Node):
raise TypeError("expected Node instance")
i = self.childNodes.index(ref)
new.parentNode = self
self.childNodes.insert(i, new)
return new
def removeChild(self, child):
"""
Remove the given L{Node} from this node's children.
@param child: A L{Node} which is a child of this node which will no
longer be a child of this node after this method is called.
@raise TypeError: If C{child} is not a C{Node} instance.
@return: C{child}
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
if child in self.childNodes:
self.childNodes.remove(child)
child.parentNode = None
return child
def replaceChild(self, newChild, oldChild):
"""
Replace a L{Node} which is already a child of this node with a
different node.
@param newChild: A L{Node} which will be made a child of this node.
@param oldChild: A L{Node} which is a child of this node which will
give up its position to C{newChild}.
@raise TypeError: If C{newChild} or C{oldChild} is not a C{Node}
instance.
@raise ValueError: If C{oldChild} is not a child of this C{Node}.
"""
if not isinstance(newChild, Node) or not isinstance(oldChild, Node):
raise TypeError("expected Node instance")
if oldChild.parentNode is not self:
raise ValueError("oldChild is not a child of this node")
self.childNodes[self.childNodes.index(oldChild)] = newChild
oldChild.parentNode = None
newChild.parentNode = self
def lastChild(self):
return self.childNodes[-1]
def firstChild(self):
if len(self.childNodes):
return self.childNodes[0]
return None
#def get_ownerDocument(self):
# """This doesn't really get the owner document; microdom nodes
# don't even have one necessarily. This gets the root node,
# which is usually what you really meant.
# *NOT DOM COMPLIANT.*
# """
# node=self
# while (node.parentNode): node=node.parentNode
# return node
#ownerDocument=node.get_ownerDocument()
# leaving commented for discussion; see also domhelpers.getParents(node)
class Document(Node):
def __init__(self, documentElement=None):
Node.__init__(self)
if documentElement:
self.appendChild(documentElement)
def cloneNode(self, deep=0, parent=None):
d = Document()
d.doctype = self.doctype
if deep:
newEl = self.documentElement.cloneNode(1, self)
else:
newEl = self.documentElement
d.appendChild(newEl)
return d
doctype = None
def isEqualToDocument(self, n):
return (self.doctype == n.doctype) and Node.isEqualToNode(self, n)
isEqualToNode = isEqualToDocument
def get_documentElement(self):
return self.childNodes[0]
documentElement=property(get_documentElement)
def appendChild(self, child):
"""
Make the given L{Node} the I{document element} of this L{Document}.
@param child: The L{Node} to make into this L{Document}'s document
element.
@raise ValueError: If this document already has a document element.
"""
if self.childNodes:
raise ValueError("Only one element per document.")
Node.appendChild(self, child)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write('<?xml version="1.0"?>' + newl)
if self.doctype:
stream.write("<!DOCTYPE "+self.doctype+">" + newl)
self.documentElement.writexml(stream, indent, addindent, newl, strip,
nsprefixes, namespace)
# of dubious utility (?)
def createElement(self, name, **kw):
return Element(name, **kw)
def createTextNode(self, text):
return Text(text)
def createComment(self, text):
return Comment(text)
def getElementsByTagName(self, name):
if self.documentElement.caseInsensitive:
return getElementsByTagNameNoCase(self, name)
return getElementsByTagName(self, name)
def getElementById(self, id):
childNodes = self.childNodes[:]
while childNodes:
node = childNodes.pop(0)
if node.childNodes:
childNodes.extend(node.childNodes)
if hasattr(node, 'getAttribute') and node.getAttribute("id") == id:
return node
class EntityReference(Node):
def __init__(self, eref, parentNode=None):
Node.__init__(self, parentNode)
self.eref = eref
self.nodeValue = self.data = "&" + eref + ";"
def isEqualToEntityReference(self, n):
if not isinstance(n, EntityReference):
return 0
return (self.eref == n.eref) and (self.nodeValue == n.nodeValue)
isEqualToNode = isEqualToEntityReference
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write(self.nodeValue)
def cloneNode(self, deep=0, parent=None):
return EntityReference(self.eref, parent)
class CharacterData(Node):
def __init__(self, data, parentNode=None):
Node.__init__(self, parentNode)
self.value = self.data = self.nodeValue = data
def isEqualToCharacterData(self, n):
return self.value == n.value
isEqualToNode = isEqualToCharacterData
class Comment(CharacterData):
"""A comment node."""
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
val=self.data
if isinstance(val, UnicodeType):
val=val.encode('utf8')
stream.write("<!--%s-->" % val)
def cloneNode(self, deep=0, parent=None):
return Comment(self.nodeValue, parent)
class Text(CharacterData):
def __init__(self, data, parentNode=None, raw=0):
CharacterData.__init__(self, data, parentNode)
self.raw = raw
def isEqualToNode(self, other):
"""
Compare this text to C{text}. If the underlying values and the C{raw}
flag are the same, return C{True}, otherwise return C{False}.
"""
return (
CharacterData.isEqualToNode(self, other) and
self.raw == other.raw)
def cloneNode(self, deep=0, parent=None):
return Text(self.nodeValue, parent, self.raw)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
if self.raw:
val = self.nodeValue
if not isinstance(val, StringTypes):
val = str(self.nodeValue)
else:
v = self.nodeValue
if not isinstance(v, StringTypes):
v = str(v)
if strip:
v = ' '.join(v.split())
val = escape(v)
if isinstance(val, UnicodeType):
val = val.encode('utf8')
stream.write(val)
def __repr__(self):
return "Text(%s" % repr(self.nodeValue) + ')'
class CDATASection(CharacterData):
def cloneNode(self, deep=0, parent=None):
return CDATASection(self.nodeValue, parent)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write("<![CDATA[")
stream.write(self.nodeValue)
stream.write("]]>")
def _genprefix():
i = 0
while True:
yield 'p' + str(i)
i = i + 1
genprefix = _genprefix().next
class _Attr(CharacterData):
"Support class for getAttributeNode."
class Element(Node):
preserveCase = 0
caseInsensitive = 1
nsprefixes = None
def __init__(self, tagName, attributes=None, parentNode=None,
filename=None, markpos=None,
caseInsensitive=1, preserveCase=0,
namespace=None):
Node.__init__(self, parentNode)
self.preserveCase = preserveCase or not caseInsensitive
self.caseInsensitive = caseInsensitive
if not preserveCase:
tagName = tagName.lower()
if attributes is None:
self.attributes = {}
else:
self.attributes = attributes
for k, v in self.attributes.items():
self.attributes[k] = unescape(v)
if caseInsensitive:
self.attributes = InsensitiveDict(self.attributes,
preserve=preserveCase)
self.endTagName = self.nodeName = self.tagName = tagName
self._filename = filename
self._markpos = markpos
self.namespace = namespace
def addPrefixes(self, pfxs):
if self.nsprefixes is None:
self.nsprefixes = pfxs
else:
self.nsprefixes.update(pfxs)
def endTag(self, endTagName):
if not self.preserveCase:
endTagName = endTagName.lower()
self.endTagName = endTagName
def isEqualToElement(self, n):
if self.caseInsensitive:
return ((self.attributes == n.attributes)
and (self.nodeName.lower() == n.nodeName.lower()))
return (self.attributes == n.attributes) and (self.nodeName == n.nodeName)
def isEqualToNode(self, other):
"""
Compare this element to C{other}. If the C{nodeName}, C{namespace},
C{attributes}, and C{childNodes} are all the same, return C{True},
otherwise return C{False}.
"""
return (
self.nodeName.lower() == other.nodeName.lower() and
self.namespace == other.namespace and
self.attributes == other.attributes and
Node.isEqualToNode(self, other))
def cloneNode(self, deep=0, parent=None):
clone = Element(
self.tagName, parentNode=parent, namespace=self.namespace,
preserveCase=self.preserveCase, caseInsensitive=self.caseInsensitive)
clone.attributes.update(self.attributes)
if deep:
clone.childNodes = [child.cloneNode(1, clone) for child in self.childNodes]
else:
clone.childNodes = []
return clone
def getElementsByTagName(self, name):
if self.caseInsensitive:
return getElementsByTagNameNoCase(self, name)
return getElementsByTagName(self, name)
def hasAttributes(self):
return 1
def getAttribute(self, name, default=None):
return self.attributes.get(name, default)
def getAttributeNS(self, ns, name, default=None):
nsk = (ns, name)
if nsk in self.attributes:
return self.attributes[nsk]
if ns == self.namespace:
return self.attributes.get(name, default)
return default
def getAttributeNode(self, name):
return _Attr(self.getAttribute(name), self)
def setAttribute(self, name, attr):
self.attributes[name] = attr
def removeAttribute(self, name):
if name in self.attributes:
del self.attributes[name]
def hasAttribute(self, name):
return name in self.attributes
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
"""
Serialize this L{Element} to the given stream.
@param stream: A file-like object to which this L{Element} will be
written.
@param nsprefixes: A C{dict} mapping namespace URIs as C{str} to
prefixes as C{str}. This defines the prefixes which are already in
scope in the document at the point at which this L{Element} exists.
This is essentially an implementation detail for namespace support.
Applications should not try to use it.
@param namespace: The namespace URI as a C{str} which is the default at
the point in the document at which this L{Element} exists. This is
essentially an implementation detail for namespace support.
Applications should not try to use it.
"""
# write beginning
ALLOWSINGLETON = ('img', 'br', 'hr', 'base', 'meta', 'link', 'param',
'area', 'input', 'col', 'basefont', 'isindex',
'frame')
BLOCKELEMENTS = ('html', 'head', 'body', 'noscript', 'ins', 'del',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'script',
'ul', 'ol', 'dl', 'pre', 'hr', 'blockquote',
'address', 'p', 'div', 'fieldset', 'table', 'tr',
'form', 'object', 'fieldset', 'applet', 'map')
FORMATNICELY = ('tr', 'ul', 'ol', 'head')
# this should never be necessary unless people start
# changing .tagName on the fly(?)
if not self.preserveCase:
self.endTagName = self.tagName
w = stream.write
if self.nsprefixes:
newprefixes = self.nsprefixes.copy()
for ns in nsprefixes.keys():
if ns in newprefixes:
del newprefixes[ns]
else:
newprefixes = {}
begin = ['<']
if self.tagName in BLOCKELEMENTS:
begin = [newl, indent] + begin
bext = begin.extend
writeattr = lambda _atr, _val: bext((' ', _atr, '="', escape(_val), '"'))
# Make a local for tracking what end tag will be used. If namespace
# prefixes are involved, this will be changed to account for that
# before it's actually used.
endTagName = self.endTagName
if namespace != self.namespace and self.namespace is not None:
# If the current default namespace is not the namespace of this tag
# (and this tag has a namespace at all) then we'll write out
# something related to namespaces.
if self.namespace in nsprefixes:
# This tag's namespace already has a prefix bound to it. Use
# that prefix.
prefix = nsprefixes[self.namespace]
bext(prefix + ':' + self.tagName)
# Also make sure we use it for the end tag.
endTagName = prefix + ':' + self.endTagName
else:
# This tag's namespace has no prefix bound to it. Change the
# default namespace to this tag's namespace so we don't need
# prefixes. Alternatively, we could add a new prefix binding.
# I'm not sure why the code was written one way rather than the
# other. -exarkun
bext(self.tagName)
writeattr("xmlns", self.namespace)
# The default namespace just changed. Make sure any children
# know about this.
namespace = self.namespace
else:
# This tag has no namespace or its namespace is already the default
# namespace. Nothing extra to do here.
bext(self.tagName)
j = ''.join
for attr, val in sorted(self.attributes.items()):
if isinstance(attr, tuple):
ns, key = attr
if ns in nsprefixes:
prefix = nsprefixes[ns]
else:
prefix = genprefix()
newprefixes[ns] = prefix
assert val is not None
writeattr(prefix+':'+key,val)
else:
assert val is not None
writeattr(attr, val)
if newprefixes:
for ns, prefix in newprefixes.iteritems():
if prefix:
writeattr('xmlns:'+prefix, ns)
newprefixes.update(nsprefixes)
downprefixes = newprefixes
else:
downprefixes = nsprefixes
w(j(begin))
if self.childNodes:
w(">")
newindent = indent + addindent
for child in self.childNodes:
if self.tagName in BLOCKELEMENTS and \
self.tagName in FORMATNICELY:
w(j((newl, newindent)))
child.writexml(stream, newindent, addindent, newl, strip,
downprefixes, namespace)
if self.tagName in BLOCKELEMENTS:
w(j((newl, indent)))
w(j(('</', endTagName, '>')))
elif self.tagName.lower() not in ALLOWSINGLETON:
w(j(('></', endTagName, '>')))
else:
w(" />")
def __repr__(self):
rep = "Element(%s" % repr(self.nodeName)
if self.attributes:
rep += ", attributes=%r" % (self.attributes,)
if self._filename:
rep += ", filename=%r" % (self._filename,)
if self._markpos:
rep += ", markpos=%r" % (self._markpos,)
return rep + ')'
def __str__(self):
rep = "<" + self.nodeName
if self._filename or self._markpos:
rep += " ("
if self._filename:
rep += repr(self._filename)
if self._markpos:
rep += " line %s column %s" % self._markpos
if self._filename or self._markpos:
rep += ")"
for item in self.attributes.items():
rep += " %s=%r" % item
if self.hasChildNodes():
rep += " >...</%s>" % self.nodeName
else:
rep += " />"
return rep
def _unescapeDict(d):
dd = {}
for k, v in d.items():
dd[k] = unescape(v)
return dd
def _reverseDict(d):
dd = {}
for k, v in d.items():
dd[v]=k
return dd
class MicroDOMParser(XMLParser):
# <dash> glyph: a quick scan thru the DTD says BODY, AREA, LINK, IMG, HR,
# P, DT, DD, LI, INPUT, OPTION, THEAD, TFOOT, TBODY, COLGROUP, COL, TR, TH,
# TD, HEAD, BASE, META, HTML all have optional closing tags
soonClosers = 'area link br img hr input base meta'.split()
laterClosers = {'p': ['p', 'dt'],
'dt': ['dt','dd'],
'dd': ['dt', 'dd'],
'li': ['li'],
'tbody': ['thead', 'tfoot', 'tbody'],
'thead': ['thead', 'tfoot', 'tbody'],
'tfoot': ['thead', 'tfoot', 'tbody'],
'colgroup': ['colgroup'],
'col': ['col'],
'tr': ['tr'],
'td': ['td'],
'th': ['th'],
'head': ['body'],
'title': ['head', 'body'], # this looks wrong...
'option': ['option'],
}
def __init__(self, beExtremelyLenient=0, caseInsensitive=1, preserveCase=0,
soonClosers=soonClosers, laterClosers=laterClosers):
self.elementstack = []
d = {'xmlns': 'xmlns', '': None}
dr = _reverseDict(d)
self.nsstack = [(d,None,dr)]
self.documents = []
self._mddoctype = None
self.beExtremelyLenient = beExtremelyLenient
self.caseInsensitive = caseInsensitive
self.preserveCase = preserveCase or not caseInsensitive
self.soonClosers = soonClosers
self.laterClosers = laterClosers
# self.indentlevel = 0
def shouldPreserveSpace(self):
for edx in range(len(self.elementstack)):
el = self.elementstack[-edx]
if el.tagName == 'pre' or el.getAttribute("xml:space", '') == 'preserve':
return 1
return 0
def _getparent(self):
if self.elementstack:
return self.elementstack[-1]
else:
return None
COMMENT = re.compile(r"\s*/[/*]\s*")
def _fixScriptElement(self, el):
# this deals with case where there is comment or CDATA inside
# <script> tag and we want to do the right thing with it
if not self.beExtremelyLenient or not len(el.childNodes) == 1:
return
c = el.firstChild()
if isinstance(c, Text):
# deal with nasty people who do stuff like:
# <script> // <!--
# x = 1;
# // --></script>
# tidy does this, for example.
prefix = ""
oldvalue = c.value
match = self.COMMENT.match(oldvalue)
if match:
prefix = match.group()
oldvalue = oldvalue[len(prefix):]
# now see if contents are actual node and comment or CDATA
try:
e = parseString("<a>%s</a>" % oldvalue).childNodes[0]
except (ParseError, MismatchedTags):
return
if len(e.childNodes) != 1:
return
e = e.firstChild()
if isinstance(e, (CDATASection, Comment)):
el.childNodes = []
if prefix:
el.childNodes.append(Text(prefix))
el.childNodes.append(e)
def gotDoctype(self, doctype):
self._mddoctype = doctype
def gotTagStart(self, name, attributes):
# print ' '*self.indentlevel, 'start tag',name
# self.indentlevel += 1
parent = self._getparent()
if (self.beExtremelyLenient and isinstance(parent, Element)):
parentName = parent.tagName
myName = name
if self.caseInsensitive:
parentName = parentName.lower()
myName = myName.lower()
if myName in self.laterClosers.get(parentName, []):
self.gotTagEnd(parent.tagName)
parent = self._getparent()
attributes = _unescapeDict(attributes)
namespaces = self.nsstack[-1][0]
newspaces = {}
for k, v in attributes.items():
if k.startswith('xmlns'):
spacenames = k.split(':',1)
if len(spacenames) == 2:
newspaces[spacenames[1]] = v
else:
newspaces[''] = v
del attributes[k]
if newspaces:
namespaces = namespaces.copy()
namespaces.update(newspaces)
for k, v in attributes.items():
ksplit = k.split(':', 1)
if len(ksplit) == 2:
pfx, tv = ksplit
if pfx != 'xml' and pfx in namespaces:
attributes[namespaces[pfx], tv] = v
del attributes[k]
el = Element(name, attributes, parent,
self.filename, self.saveMark(),
caseInsensitive=self.caseInsensitive,
preserveCase=self.preserveCase,
namespace=namespaces.get(''))
revspaces = _reverseDict(newspaces)
el.addPrefixes(revspaces)
if newspaces:
rscopy = self.nsstack[-1][2].copy()
rscopy.update(revspaces)
self.nsstack.append((namespaces, el, rscopy))
self.elementstack.append(el)
if parent:
parent.appendChild(el)
if (self.beExtremelyLenient and el.tagName in self.soonClosers):
self.gotTagEnd(name)
def _gotStandalone(self, factory, data):
parent = self._getparent()
te = factory(data, parent)
if parent:
parent.appendChild(te)
elif self.beExtremelyLenient:
self.documents.append(te)
def gotText(self, data):
if data.strip() or self.shouldPreserveSpace():
self._gotStandalone(Text, data)
def gotComment(self, data):
self._gotStandalone(Comment, data)
def gotEntityReference(self, entityRef):
self._gotStandalone(EntityReference, entityRef)
def gotCData(self, cdata):
self._gotStandalone(CDATASection, cdata)
def gotTagEnd(self, name):
# print ' '*self.indentlevel, 'end tag',name
# self.indentlevel -= 1
if not self.elementstack:
if self.beExtremelyLenient:
return
raise MismatchedTags(*((self.filename, "NOTHING", name)
+self.saveMark()+(0,0)))
el = self.elementstack.pop()
pfxdix = self.nsstack[-1][2]
if self.nsstack[-1][1] is el:
nstuple = self.nsstack.pop()
else:
nstuple = None
if self.caseInsensitive:
tn = el.tagName.lower()
cname = name.lower()
else:
tn = el.tagName
cname = name
nsplit = name.split(':',1)
if len(nsplit) == 2:
pfx, newname = nsplit
ns = pfxdix.get(pfx,None)
if ns is not None:
if el.namespace != ns:
if not self.beExtremelyLenient:
raise MismatchedTags(*((self.filename, el.tagName, name)
+self.saveMark()+el._markpos))
if not (tn == cname):
if self.beExtremelyLenient:
if self.elementstack:
lastEl = self.elementstack[0]
for idx in range(len(self.elementstack)):
if self.elementstack[-(idx+1)].tagName == cname:
self.elementstack[-(idx+1)].endTag(name)
break
else:
# this was a garbage close tag; wait for a real one
self.elementstack.append(el)
if nstuple is not None:
self.nsstack.append(nstuple)
return
del self.elementstack[-(idx+1):]
if not self.elementstack:
self.documents.append(lastEl)
return
else:
raise MismatchedTags(*((self.filename, el.tagName, name)
+self.saveMark()+el._markpos))
el.endTag(name)
if not self.elementstack:
self.documents.append(el)
if self.beExtremelyLenient and el.tagName == "script":
self._fixScriptElement(el)
def connectionLost(self, reason):
XMLParser.connectionLost(self, reason) # This can cause more events!
if self.elementstack:
if self.beExtremelyLenient:
self.documents.append(self.elementstack[0])
else:
raise MismatchedTags(*((self.filename, self.elementstack[-1],
"END_OF_FILE")
+self.saveMark()
+self.elementstack[-1]._markpos))
def parse(readable, *args, **kwargs):
"""Parse HTML or XML readable."""
if not hasattr(readable, "read"):
readable = open(readable, "rb")
mdp = MicroDOMParser(*args, **kwargs)
mdp.filename = getattr(readable, "name", "<xmlfile />")
mdp.makeConnection(None)
if hasattr(readable,"getvalue"):
mdp.dataReceived(readable.getvalue())
else:
r = readable.read(1024)
while r:
mdp.dataReceived(r)
r = readable.read(1024)
mdp.connectionLost(None)
if not mdp.documents:
raise ParseError(mdp.filename, 0, 0, "No top-level Nodes in document")
if mdp.beExtremelyLenient:
if len(mdp.documents) == 1:
d = mdp.documents[0]
if not isinstance(d, Element):
el = Element("html")
el.appendChild(d)
d = el
else:
d = Element("html")
for child in mdp.documents:
d.appendChild(child)
else:
d = mdp.documents[0]
doc = Document(d)
doc.doctype = mdp._mddoctype
return doc
def parseString(st, *args, **kw):
if isinstance(st, UnicodeType):
# this isn't particularly ideal, but it does work.
return parse(StringIO(st.encode('UTF-16')), *args, **kw)
return parse(StringIO(st), *args, **kw)
def parseXML(readable):
"""Parse an XML readable object."""
return parse(readable, caseInsensitive=0, preserveCase=1)
def parseXMLString(st):
"""Parse an XML readable object."""
return parseString(st, caseInsensitive=0, preserveCase=1)
# Utility
class lmx:
"""Easy creation of XML."""
def __init__(self, node='div'):
if isinstance(node, StringTypes):
node = Element(node)
self.node = node
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError("no private attrs")
return lambda **kw: self.add(name,**kw)
def __setitem__(self, key, val):
self.node.setAttribute(key, val)
def __getitem__(self, key):
return self.node.getAttribute(key)
def text(self, txt, raw=0):
nn = Text(txt, raw=raw)
self.node.appendChild(nn)
return self
def add(self, tagName, **kw):
newNode = Element(tagName, caseInsensitive=0, preserveCase=0)
self.node.appendChild(newNode)
xf = lmx(newNode)
for k, v in kw.items():
if k[0] == '_':
k = k[1:]
xf[k]=v
return xf
|
|
import json
import datetime
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required
from pycon.tutorials.models import PyConTutorialProposal
from pycon.tutorials.utils import process_tutorial_request
from symposion.schedule.forms import SlotEditForm
from symposion.schedule.models import Schedule, Day, Slot, Presentation
from symposion.schedule.timetable import TimeTable
def fetch_schedule(slug):
qs = Schedule.objects.all()
if slug is None:
if qs.count() > 1:
raise Http404()
schedule = next(iter(qs), None)
if schedule is None:
raise Http404()
else:
schedule = get_object_or_404(qs, section__slug=slug)
return schedule
def schedule_conference(request):
days = Day.objects.filter(schedule__published=True)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_conference.html", {
"timetables": timetables,
})
def schedule_detail(request, slug=None):
schedule = fetch_schedule(slug)
if not schedule.published and not request.user.is_staff:
raise Http404()
days = Day.objects.filter(schedule=schedule)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_detail.html", {
"schedule": schedule,
"timetables": timetables,
})
def schedule_list(request, slug=None):
schedule = fetch_schedule(slug)
presentations = Presentation.objects.filter(section=schedule.section)
presentations = presentations.exclude(cancelled=True).order_by('title')
ctx = {
"schedule": schedule,
"presentations": presentations,
}
return render(request, "schedule/schedule_list.html", ctx)
def schedule_list_csv(request, slug=None):
schedule = fetch_schedule(slug)
presentations = Presentation.objects.filter(section=schedule.section)
presentations = presentations.exclude(cancelled=True).order_by("id")
response = HttpResponse(mimetype="text/csv")
if slug:
file_slug = slug
else:
file_slug = "presentations"
response["Content-Disposition"] = 'attachment; filename="%s.csv"' % file_slug
response.write(loader.get_template("schedule/schedule_list.csv").render(Context({
"presentations": presentations,
})))
return response
@login_required
def schedule_edit(request, slug=None):
if not request.user.is_staff:
raise Http404()
schedule = fetch_schedule(slug)
days = Day.objects.filter(schedule=schedule)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_edit.html", {
"schedule": schedule,
"timetables": timetables,
})
@login_required
def schedule_slot_edit(request, slug, slot_pk):
if not request.user.is_staff:
raise Http404()
slot = get_object_or_404(Slot, day__schedule__section__slug=slug, pk=slot_pk)
if request.method == "POST":
form = SlotEditForm(request.POST, slot=slot)
if form.is_valid():
save = False
if "content_override" in form.cleaned_data:
slot.content_override = form.cleaned_data["content_override"]
save = True
if "presentation" in form.cleaned_data:
presentation = form.cleaned_data["presentation"]
if presentation is None:
slot.unassign()
else:
slot.assign(presentation)
if save:
slot.save()
return redirect("schedule_edit", slug)
else:
form = SlotEditForm(slot=slot)
ctx = {
"slug": slug,
"form": form,
"slot": slot,
}
return render(request, "schedule/_slot_edit.html", ctx)
def schedule_presentation_detail(request, pk):
presentation = get_object_or_404(Presentation, pk=pk)
# Tutorials allow for communication between instructor/attendee(s).
# Offload the logic to its utility
if isinstance(presentation.proposal, PyConTutorialProposal) and \
request.method == 'POST':
return process_tutorial_request(request, presentation)
if presentation.slot:
schedule = presentation.slot.day.schedule
else:
schedule = None
ctx = {
"presentation": presentation,
"proposal": presentation.proposal,
"speakers": presentation.proposal.speakers,
"schedule": schedule,
}
return render(request, "schedule/presentation_detail.html", ctx)
def json_serializer(obj):
if isinstance(obj, datetime.time):
return obj.strftime("%H:%M")
raise TypeError
def schedule_json(request):
"""
Returns information about the schedule.
*No authentication required.*
URL: /<YEAR>/schedule/conference.json
The data returned is in JSON format, and looks like::
[ <slot>, <slot>, ..., <poster>, <poster> ...]
where a slot represents a talk, tutorial, or plenary and looks like::
{
"kind": "talk"|"tutorial"|"plenary",
"name": "Title of talk",
"room": "roomname1, roomname2, ..., roomnameN",
"start": "HH:MM:SS", # ISO format
"end": "HH:MM:SS", # ISO format
"duration": 30, # minutes
"authors" ["author name 1", "author name 2", ..., "author name N"],
"abstract": "Lorem ipsum and so forth and so on",
"description: "Lorem ipsum and so forth and so on",
"conf_key": 27,
"conf_url": "https://conference_domain/path/to/talk",
"video_url": "https://somehost/path/to/video_of_talk",
"slides_url": "https://somehost/path/to/slides_of_talk",
"assets_url": "https://somehost/path/to/assets_for_talk",
"tags": "tag1, tag2, ..., tagN",
"recording_release": true
}
and a poster looks like::
{
"kind": "poster",
"name": "Title of poster",
"authors" ["author name 1", "author name 2", ..., "author name N"],
"abstract": "Lorem ipsum and so forth and so on",
"description: "Lorem ipsum and so forth and so on",
"room": "roomname1, roomname2, ..., roomnameN",
"start": "HH:MM:SS", # Provided but meaningless, ignore...
"end": "HH:MM:SS", # Provided but meaningless, ignore...
"conf_key": 1227,
"conf_url": "https://conference_domain/path/to/page/about/talk"
}
"""
slots = Slot.objects.all().order_by("start")
data = []
for slot in slots:
if slot.kind.label in ["talk", "tutorial", "plenary"] and slot.content:
slot_data = {
"name": slot.content.title,
"room": ", ".join(room["name"] for room in slot.rooms.values()),
"start": slot.start_date.isoformat(),
"end": slot.end_date.isoformat(),
"duration": slot.duration,
"authors": [s.name for s in slot.content.speakers()],
"abstract": getattr(slot.content.abstract, 'raw', slot.content.abstract),
"description": getattr(slot.content.description, 'raw', slot.content.description),
"conf_key": slot.pk,
"conf_url": "https://%s%s" % (
Site.objects.get_current().domain,
reverse("schedule_presentation_detail", args=[slot.content.pk])
),
"kind": slot.kind.label,
"video_url": slot.content.video_url,
"slides_url": slot.content.slides_url,
"assets_url": slot.content.assets_url,
"tags": "",
"recording_release": slot.content.proposal.recording_release if hasattr(slot.content.proposal, 'recording_release') else None,
}
else:
continue
data.append(slot_data)
for poster in Presentation.objects.filter(section__slug="posters", cancelled=False):
poster_data = {
"name": poster.title,
"authors": [s.name for s in poster.speakers()],
"description": getattr(poster.description, 'raw', poster.description),
"abstract": getattr(poster.abstract, 'raw', poster.abstract),
"room": "Poster Room",
"start": datetime.datetime(2014, 03, 17, 10).isoformat(),
"end": datetime.datetime(2014, 03, 17, 13, 10).isoformat(),
"conf_key": 1000 + poster.pk,
"conf_url": "https://%s%s" % (
Site.objects.get_current().domain,
reverse("schedule_presentation_detail", args=[poster.pk])
),
"kind": "poster",
}
data.append(poster_data)
return HttpResponse(
json.dumps(data, default=json_serializer),
content_type="application/json"
)
|
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright 2016, 2015, 2014, 2013, 2012 Pavel Kostelnik
# Copyright 2017, 2018 Michael Schwager
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File: DragNDropWidget.py
# the drag and drop widget library for Kivy.
# Version 0.4
from __future__ import print_function
# import copy
from kivy.animation import Animation
from kivy.core.window import Window
from kivy.properties import (
ListProperty, NumericProperty, BooleanProperty, ObjectProperty, StringProperty)
from kivy.uix.widget import Widget
# from kivydnd import dnd_storage_singletons
from kivydnd.dnd_storage_singletons import draggables_dict, drag_destinations_dict
from kivydnd.debug_print import Debug, debug_widget_title
debug = Debug() # Is False by default.
DEBUG_TOUCH_UP = 0x00
DEBUG_TOUCH_MOVE = 0x00
DEBUG_DRAG_START = 0x00
DEBUG_COLLIDE_POINT = 0x00
DEBUG_DRAG_FINISH = 0x00
DEBUG_UNROOT_ME = 0x00
DEBUG_REBORN = 0x00
DEBUG_SUCCESSFUL_DROP = 0x00
DEBUG_POST_SUCCESSFUL_ANIM = 0x00
debug.register = DEBUG_TOUCH_UP | DEBUG_TOUCH_MOVE | DEBUG_DRAG_START | DEBUG_COLLIDE_POINT |\
DEBUG_DRAG_FINISH | DEBUG_UNROOT_ME | DEBUG_REBORN | DEBUG_SUCCESSFUL_DROP |\
DEBUG_POST_SUCCESSFUL_ANIM
# draggables_dict = dnd_storage_singletons.draggables_dict
# drag_destinations_dict = dnd_storage_singletons.drag_destinations_dict
class DragNDropWidget(Widget):
# let kivy take care of kwargs and get signals for free by using
# properties
droppable_zone_objects = ListProperty([])
bound_zone_objects = ListProperty([])
drag_opacity = NumericProperty(1.0)
drop_func = ObjectProperty(None)
drop_args = ListProperty([])
while_dragging_func = ObjectProperty(None) # The touch is given in Window coordinates
failed_drop_func = ObjectProperty(None)
failed_drop_args = ListProperty([])
remove_on_drag = BooleanProperty(True)
drop_ok_do_animation = BooleanProperty(True)
drop_ok_animation_time = NumericProperty(0.5)
not_drop_ok_do_animation = BooleanProperty(True)
not_drop_ok_animation_time = NumericProperty(0.2)
motion_over_widget_func = ObjectProperty(None)
motion_over_widget_args = ListProperty([])
motion_flee_widget_func = ObjectProperty(None)
motion_flee_widget_args = ListProperty([])
motion_outside_widget_func = ObjectProperty(None)
motion_outside_widget_args = ListProperty([])
drag_start_func = ObjectProperty(None)
drag_start_args = ListProperty([])
can_drop_into_parent = BooleanProperty(False)
drop_group = StringProperty("_palm_default")
rebirth_failed_drop = BooleanProperty(True)
close_on_fail = BooleanProperty(False)
# This is not a Property
widget_entered = None
def __init__(self, **kw):
super(DragNDropWidget, self).__init__(**kw)
self.register_event_type("on_drag_start")
self.register_event_type("on_being_dragged")
self.register_event_type("on_drag_finish")
self.register_event_type("on_motion_over")
self.register_event_type("on_motion_flee")
self.register_event_type("on_motion_outside")
self.register_event_type("on_close")
self._old_opacity = self.opacity
self._dragged = False
self._draggable = True
self.copy = False
self.touch_offset_x = 0
self.touch_offset_y = 0
self.drop_recipients = []
self.am_touched = False
self.double_tap_drag = False
self.is_double_tap = False
self.motion_is_bound_to_window = False
self.bind(motion_over_widget_func=self.bind_mouse_motion)
self.bind(motion_flee_widget_func=self.bind_mouse_motion)
self.bind(motion_outside_widget_func=self.bind_mouse_motion)
self.bind(drop_group=self.bind_drop_group)
self.found_drop_recipients_ok_dict = {}
self.min_x = -1
self.min_y = -1
self.max_x = -1
self.max_y = -1
self.move_counter = 0
self.touch_up_event_start = 0
self._up_event_count = 0
def close(self):
"""
You must call close() when you are removing the widget from the display.
:return:
"""
self.dispatch("on_close")
def on_close(self):
self.unbind(motion_over_widget_func=self.bind_mouse_motion)
self.unbind(motion_flee_widget_func=self.bind_mouse_motion)
self.unbind(motion_outside_widget_func=self.bind_mouse_motion)
self.unbind(drop_group=self.bind_drop_group)
self.unregister_event_types("on_drag_start")
self.unregister_event_types("on_being_dragged")
self.unregister_event_types("on_drag_finish")
self.unregister_event_types("on_motion_over")
self.unregister_event_types("on_motion_flee")
self.unregister_event_types("on_motion_outside")
self.unregister_event_types("on_close")
if self.motion_is_bound_to_window:
Window.unbind(mouse_pos=self.on_motion)
self.motion_is_bound_to_window = False
def bind_drop_group(self, arg1, arg2):
if self.drop_group not in draggables_dict:
draggables_dict[self.drop_group] = {}
draggables_dict[self.drop_group][self] = True
run_already = False
def bind_mouse_motion(self, the_widget, which_function):
if self.motion_is_bound_to_window is False:
Window.bind(mouse_pos=self.on_motion)
self.motion_is_bound_to_window = True
def set_draggable(self, value):
self._draggable = value
def set_remove_on_drag(self, value):
"""
This function sets the property that determines whether the dragged widget is just
copied from its parent or taken from its parent.
@param value: either True or False. If True then the widget will disappear from its
parent on drag, else the widget will just get copied for dragging
"""
self.remove_on_drag = value
def set_drag_start_state(self):
self._move_counter = 0
self._old__opacity = self.opacity
self.opacity = self.drag_opacity
self.set_bound_axis_positions()
self._old_drag_pos = self.pos
self._old_parent = self.parent
self._old_parent_children_reversed_list = self.parent.children[:]
self._old_parent_children_reversed_list.reverse()
self._dragged = True
DragNDropWidget.widget_entered = None
if self.copy:
self._old_index = -1
else:
self._old_index = self.parent.children.index(self)
def set_drag_finish_state(self, set_opacity=True):
# TODO: set_opacity is unused at present.
# TODO: Utilize it!
global DEBUG_DRAG_FINISH
self.is_double_tap = False
self._dragged = False
self.copy = False
self.move_counter = 0
self._up_event_count = 0
self.am_touched = False
# TODO: If I was the copy, I need to not be a copy :-). Set it to false...
# TODO: (after current debugging on 6/17/17)
if set_opacity:
self.opacity = self._old_opacity
debug.print(" ****************** DRAG N DROP TOTALLY DONE *********************", self, level=DEBUG_DRAG_FINISH)
def set_bound_axis_positions(self):
for obj in self.bound_zone_objects:
if self.min_x == -1:
self.max_x = obj.x + obj.size[0] - self.size[0]
self.max_y = obj.y + obj.size[1] - self.size[1]
self.min_x = obj.x
self.min_y = obj.y
if self.max_y < obj.y+obj.size[1]-self.size[1]:
self.max_y = obj.y+obj.size[1]-self.size[1]
if self.max_x < obj.x+obj.size[0]-self.size[0]:
self.max_x = obj.x + obj.size[0]-self.size[0]
if self.min_y > obj.y:
self.min_y = obj.y
if self.min_x > obj.x:
self.min_x = obj.x
def on_touch_down(self, touch):
"""
If we are a draggable object and the touch collides with us, we could be
embarking on a drag.
Kivy is knowledgeable about gestures and will send a touch down event immediately
if a touch up event quickly follows it, but it will delay an event if a touch
lingers.
TODO: Understand that mechanism.
on_touch_down contains a numerical value. If the object is touched for a period longer
than this value then we may be entering a drag operation. The value should probably be
made configurable. As a matter of fact, I'm sure it should be. ...TODO.
Note that if you hold down the touch, then after a short time the event will
be dispatched and touch.time_end will be -1.
touch.is_double_tap is maintained by Kivy.
self.is_double_tap is maintained by the widget, because we don't want on_touch_up to
set "am_touched" to be false too quickly..
:param touch:
:return:
"""
# TODO: make the drag delay configurable
# if self.text == "Me in relief.JPG":
# debug.print ("touch down Me in relief", definitely=True)
if self.collide_point(touch.x, touch.y) and self._draggable:
# detect if the touch is "long"... (if not, dispatch drag)
if (abs(touch.time_end - touch.time_start) > 0.2) or touch.is_double_tap:
self.touch_offset_x = touch.x - self.x
self.touch_offset_y = touch.y - self.y
self.am_touched = True
if touch.is_double_tap:
self.is_double_tap = True
def on_touch_up(self, mouse_motion_event):
"""
In a double tap, this gets called after each tap, which sets am_touched to be False.
So there's a bit of a complication, which is dealt with in the first if statement.
On regular touches, we're not going to do any dragging unless the touch had a
certain duration. If it's just a quick touch we should ignore it. This is the
'am_touched' variable.
:param touch:
:return:
"""
#if self.collide_point(mouse_motion_event.x, mouse_motion_event.y) and self._draggable:
global DEBUG_TOUCH_UP
# debug.print ("*** DragNDropWidget", level=DEBUG_TOUCH_UP)
# debug.print ("*** self:", self, "copy:", self.copy, "parent:", self.parent, level=DEBUG_TOUCH_UP)
# debug.print ("*** id:", hex(id(self)), level=DEBUG_TOUCH_UP)
# debug.print ("*** am_touched:", self.am_touched, "is_double_tap:", self.is_double_tap, level=DEBUG_TOUCH_UP)
# debug.print ("*** was dragged?", self._dragged, "event:", mouse_motion_event.__repr__(), level=DEBUG_TOUCH_UP)
# debug.print ("*** on_touch_up ***", level=DEBUG_TOUCH_UP)
# debug.print ("*** Mouse Motion Event:", mouse_motion_event, level=DEBUG_TOUCH_UP)
# if self.text == "Me in relief.JPG":
# debug.print ("I hit Me in relief, double:", self.is_double_tap, definitely=True)
# If a widget is reborn, this event may be called a second time. Don't do that.
if self.touch_up_event_start == mouse_motion_event.time_start:
return
self.touch_up_event_start = mouse_motion_event.time_start
if not self.am_touched:
# Only respond to long touches.
debug.print(self, "NOT touched", level=DEBUG_TOUCH_UP)
return
else:
debug.print(self, "am touched", level=DEBUG_TOUCH_UP)
debug.print ("Am_touched:", self.am_touched, "double tap:", self.is_double_tap, level=DEBUG_TOUCH_UP)
self._up_event_count += 1
# Without this, double tap will never allow the widget to drag...
# Because self.am_touched will be set to false on the line following and
# on_touch_move will then do nothing
# TODO: Figure out why I'm setting drag_finish_state here.
# TODO: This seems odd, but I needed it at one point.
# TODO: I set it in
# if self.is_double_tap and not self._dragged:
# self.set_drag_finish_state() # _drag_started, is_double_tap, _dragged, copy all False
# return
#
if self._draggable and self._dragged:
debug.print ("on_touch_up: DRAGGED!!!!!!!", level=DEBUG_TOUCH_UP)
self.touch_x = mouse_motion_event.x
self.touch_y = mouse_motion_event.y
debug.print ('dispatch "on_drag_finish", mouse_motion_event) *******************************', level=DEBUG_TOUCH_UP)
# NOTE: If I don't do this, then I can click on a finished, fading widget.
self._dragged = False
# NOTE: ...that would cause an Attribute Error
self.dispatch("on_drag_finish", mouse_motion_event)
return
# TODO: Is this right? How do I send on_touch_up after
# TODO: a double tap?
else:
debug.print ("_draggable:", self._draggable, "_dragged:",
self._dragged, "is_double_tap:", self.is_double_tap, "up event count:",
self._up_event_count, level=DEBUG_TOUCH_UP)
# Here, the user double-tapped and just came up, or
# the user single tapped. Kivy sends the first on_touch_up event in the
# middle of a double-tap. This handles the case when the touch simply comes
# up.
if ( self.is_double_tap == True and self._up_event_count == 2 ) or self.is_double_tap == False:
debug.print ("Reset _up_event_count.", level=DEBUG_TOUCH_UP)
debug.print ("is_double_tap", self.is_double_tap, "_up_event_count", self._up_event_count,
level=DEBUG_TOUCH_UP)
self.set_drag_finish_state()
# TODO: LOOK ALL OVER FOR DISPATCH, AND SEND COORDS
# TODO: Need to set Window.bind(mouse_pos=self.on_motion)
# TODO: The functions are Properties, so I can do this when they're set!!!
def on_touch_move(the_widget, mouse_motion_event):
"""
As per the Kivy docs (under Widget), mouse_motion_event
is in parent coordinates.
:param mouse_motion_event:
:return:
"""
global DEBUG_TOUCH_MOVE
if the_widget.am_touched:
debug.print("MOVING", the_widget.text, level=DEBUG_TOUCH_MOVE)
if not the_widget._dragged:
the_widget.dispatch("on_drag_start", mouse_motion_event)
else:
# debug.print("Not touched:", the_widget.text, level=DEBUG_TOUCH_MOVE)
return
if not the_widget._dragged:
return
the_widget._move_counter += 1
if the_widget._draggable and the_widget._dragged:
# if the_widget._dragged and the_widget._draggable:
x = mouse_motion_event.x - the_widget.touch_offset_x
y = mouse_motion_event.y - the_widget.touch_offset_y
# TODO: Correct this debug_flag temporary print.
debug.print ("widget pos:", x, y, "parent:", the_widget.parent,
"window:", Window.mouse_pos[0], Window.mouse_pos[1], level=DEBUG_TOUCH_MOVE)
if the_widget.min_x != -1:
if x <= the_widget.min_x:
x = the_widget.min_x
if x > the_widget.max_x:
x = the_widget.max_x
if y <= the_widget.min_y:
y = the_widget.min_y
if y > the_widget.max_y:
y = the_widget.max_y
the_widget.pos = (x, y)
# SPECIAL! Takes a herky-jerky GUI and makes it smoooooth....
the_widget.canvas.ask_update()
# Execute widget's while_dragging_func while dragging the widget
if the_widget.while_dragging_func is not None:
the_widget.while_dragging_func(the_widget, mouse_motion_event)
# Execute while_dragging_func for all drag destinations that are in the same
# drop group as the widget, that the widget passes over.
for drop_group in draggables_dict:
if draggables_dict[drop_group].get(the_widget):
if drag_destinations_dict.get(drop_group) is not None:
for drag_destination in drag_destinations_dict.get(drop_group):
if drag_destination.while_dragging_func is not None:
if drag_destination.absolute_collide_point(Window.mouse_pos[0], Window.mouse_pos[1]):
debug.print("Window mouse:", Window.mouse_pos[0], Window.mouse_pos[1],
"Touch pos to Window:",
the_widget.to_window(mouse_motion_event.x, mouse_motion_event.y),
level=DEBUG_TOUCH_MOVE)
drag_destination.while_dragging_func(the_widget, mouse_motion_event)
# DEPRECATED.................................................................
# No longer used. ...But what is the purpose of bind_functions? Pavel wrote
# it but I don't understand its purpose.
def easy_access_dnd(self, function_to_do_over, function_to_do_flee,
function_to_do_outside, arguments = None, bind_functions = None):
"""
This function enables something that can be used instead of drag n drop
@param function_to_do: function that is to be called when mouse_over event is fired on the widget
@param bind_functions: what is really to be done - background function for GUI functionality
"""
if arguments is None:
arguments = []
if bind_functions is None:
bind_functions = []
Window.bind(mouse_pos=self.on_motion)
self.easy_access_dnd_function_over = function_to_do_over
self.easy_access_dnd_function_flee = function_to_do_flee
self.easy_access_dnd_function_outside = function_to_do_outside
self.easy_access_dnd_function_arguments = arguments
self.easy_access_dnd_function_binds = bind_functions
# ^^^ DEPRECATED..............................................................
def on_motion(self, top_level_window, motion_xy_tuple):
"""
As the mouse moves in the window, do stuff:
- If it hits this widget, and
- If it had not marked this widget as entered,
- If it had not marked ANY widget as entered,,
- we have moved over this widget; dispatch on_motion_over
- make this widget as entered
else: (it hit this widget, but it had marked another widget as entered)
n (This means it left that widget without dispatching on_motion_flee)
- dispatch on_motion_flee for the other widget
- dispatch on_motion_over for this widget
- mark this widget as entered.
:param top_level_window: The top level kivy window
:param motion_xy_typle: the coordinates of the mouse in the Window's coord system
:return:
"""
if self._dragged:
return
if self.collide_point(*self.to_widget(motion_xy_tuple[0], motion_xy_tuple[1])):
if DragNDropWidget.widget_entered is not self:
if DragNDropWidget.widget_entered is not None:
# widget_entered is set, but it's not us. That means we just jumped
# from another widget to this one. We should make sure we fled the
# old one properly.
DragNDropWidget.widget_entered.dispatch("on_motion_flee", motion_xy_tuple)
self.dispatch("on_motion_over", motion_xy_tuple)
DragNDropWidget.widget_entered = self
else:
if DragNDropWidget.widget_entered is not None:
if self is DragNDropWidget.widget_entered:
self.dispatch("on_motion_flee", motion_xy_tuple)
DragNDropWidget.widget_entered = None
else:
self.dispatch("on_motion_outside", motion_xy_tuple)
def on_motion_flee(self, motion_xy_tupel):
"""
Called when your touch point leaves a draggable item.
:return:
"""
if self.motion_flee_widget_func is not None:
self.motion_flee_widget_func(self, self.motion_flee_widget_args)
# TODO: WAS... adding this binds. Not sure why.
# self.easy_access_dnd_function_binds)
else:
pass
# debug.print "FUNCTION MOTION FLEE NONE"
DragNDropWidget.widget_entered = None
def on_motion_over(self, motion_xy_tuple):
"""
Called when your touch point crosses into a draggable item.
:return:
"""
if self.motion_over_widget_func is not None:
self.motion_over_widget_func(self, self.motion_over_widget_args)
# self.easy_access_dnd_function_binds)
else:
pass
# debug.print "FUNCTION MOTION OVER NONE"
def on_motion_outside(self, motion_xy_tuple):
try:
if self.motion_outside_widget_func is not None:
self.motion_outside_widget_func(self, self.motion_outside_widget_args)
else:
pass
# debug.print "FUNCTION OUT NONE"
except AttributeError:
pass
def deepen_the_copy(self, copy_of_self):
copy_of_self.copy = True
copy_of_self.parent = self.parent
copy_of_self.droppable_zone_objects = self.droppable_zone_objects
copy_of_self.bound_zone_objects = self.bound_zone_objects
copy_of_self.drag_opacity = self.drag_opacity
copy_of_self.drop_func = self.drop_func
copy_of_self.drop_args = self.drop_args
copy_of_self.drag_start_func = self.drag_start_func
copy_of_self.drag_start_args = self.drag_start_args
copy_of_self.failed_drop_func = self.failed_drop_func
copy_of_self.failed_drop_args = self.failed_drop_args
copy_of_self.remove_on_drag = self.remove_on_drag
copy_of_self.drop_ok_do_animation = self.drop_ok_do_animation
copy_of_self.drop_ok_animation_time = self.drop_ok_animation_time
copy_of_self.not_drop_ok_do_animation = self.not_drop_ok_do_animation
copy_of_self.not_drop_ok_animation_time = self.not_drop_ok_animation_time
copy_of_self.touch_offset_x = self.touch_offset_x
copy_of_self.touch_offset_y = self.touch_offset_y
copy_of_self.drop_recipients = self.drop_recipients
copy_of_self.drop_group = self.drop_group
copy_of_self.am_touched = self.am_touched
copy_of_self._dragged = self._dragged
copy_of_self.is_double_tap = self.is_double_tap
copy_of_self._up_event_count = self._up_event_count
copy_of_self.can_drop_into_parent = self.can_drop_into_parent
copy_of_self.rebirth_failed_drop = self.rebirth_failed_drop
copy_of_self.close_on_fail = self.close_on_fail
def on_drag_start(self, mouse_motion_event):
"""
When a drag starts, the widget is removed from its parent and added to the root window.
If self.remove_on_drag is false, it's a copy which means the copy is added to the root window.
:param mouse_motion_event: Sent to us by Kivy. Contains .x and .y of the event, among, for
example: is_double_tap(bool), is_touch(bool), is_triple_tap(bool), pos(tuple), time_start(float), etc.
:return:
"""
global DEBUG_DRAG_START
if self._dragged:
return
debug.print("STARTING DRAG. Remove?", self.remove_on_drag, level=DEBUG_DRAG_START)
debug.print("is_double_tap:", self.is_double_tap, level=DEBUG_DRAG_START)
debug.print("What about class", self, "drag_start_func?:", str(self.drag_start_func), level=DEBUG_DRAG_START)
debug.print("Event:", mouse_motion_event, level=DEBUG_DRAG_START)
if self.remove_on_drag:
self.set_drag_start_state()
debug.print("remove_on_drag, What about class", self, "drag_start_func?:", str(self.drag_start_func), level=DEBUG_DRAG_START)
if self.drag_start_func is not None:
self.drag_start_func(self.drag_start_args)
self.root_window = self.parent.get_root_window()
self.root_parent(self)
else:
#create copy of object to drag
debug.print("Create copy, kivydnd copy of: ", self.text, self, level=DEBUG_DRAG_START)
# copy_of_self = copy.deepcopy(self)
copy_of_self = self.kivydnd_copy()
# We'll handle those variables that are common to ALL d-n-d
# widgets. The widgets' classes can handle specifics
# (such as text, etc.)
self.deepen_the_copy(copy_of_self)
self.am_touched = False
self._up_event_count = 0
copy_of_self.set_drag_start_state()
if copy_of_self.drag_start_func is not None:
copy_of_self.drag_start_func(copy_of_self.drag_start_args, copy=copy_of_self)
copy_of_self.root_window = self.parent.get_root_window()
# the final child class MUST implement __deepcopy__
# IF self.remove_on_drag == False !!! In this case this is
# met in draggableArhellModelImage class
# TODO: MIKE: it used to be that copy_of_self was added to _old_parent
# self._old_parent.add_widget(copy_of_self, index=self._old_index)
copy_of_self.root_parent(copy_of_self)
copy_of_self.pos = self.pos
debug.print("kivydnd copy: ", copy_of_self.text, copy_of_self, level=DEBUG_DRAG_START)
def absolute_collide_point(self, event_x, event_y):
global DEBUG_COLLIDE_POINT
(my_x, my_y)=self.to_window(self.x, self.y)
# debug.print "absolute_collide_point:", self, "x,y,w,h:", my_x, my_y, self.right + my_x, my_y + self.top
if event_x != Window.mouse_pos[0] or event_y != Window.mouse_pos[1]:
debug.print ("absolute_collide_point:", self, "x,y,w,h:", my_x, my_y, self.right + my_x, my_y + self.top, level=DEBUG_COLLIDE_POINT)
return my_x <= event_x <= (self.width + my_x) and my_y <= event_y <= (my_y + self.height)
def on_drag_finish(self, mouse_motion_event):
global DEBUG_DRAG_FINISH
# Don't worry, opacity will be properly set in set_drag_finish_state()
# after the animation
debug.print ("================================================================", level=DEBUG_DRAG_FINISH)
debug.print ("beginning, parent:", self.parent, "copy?", self.copy, level=DEBUG_DRAG_FINISH)
debug.print ("self:", self, "is_double_tap?", self.is_double_tap, level=DEBUG_DRAG_FINISH)
debug.print ("Dragged?", self._dragged, "Draggable?", self._draggable, level=DEBUG_DRAG_FINISH)
debug.print ("================================================================", level=DEBUG_DRAG_FINISH)
self.opacity = 1.0
drag_destination_list = []
self.found_drop_recipients_ok_dict = {}
# del self.drop_recipients[:]
(touch_window_x, touch_window_y) = self.to_window(self.touch_x, self.touch_y)
# -------------------------------------------------------------------------
# --- assemble list of possible drag destinations
# These destinations are based on either drop groups, or simply because
# they've been added to droppable_zone_objects
# debug.print "on_drag_finish: DRAGGABLES_DICT:", draggables_dict
debug.print("draggables_dict:", draggables_dict, level=DEBUG_DRAG_FINISH)
for drop_group in draggables_dict:
if draggables_dict[drop_group].get(self):
if drop_group in drag_destinations_dict:
for drop_recipient in drag_destinations_dict[drop_group]:
if not drop_recipient in drag_destination_list:
drag_destination_list.append(drop_recipient)
debug.print("drag_destinations_dict:", drag_destinations_dict, level=DEBUG_DRAG_FINISH)
# ..>Debugging only
for drop_group in drag_destinations_dict:
for obj in drag_destinations_dict[drop_group]:
debug.print("Contents: Title", debug_widget_title(obj), "object", obj, level=DEBUG_DRAG_FINISH)
# ..<debugging
for drop_group in drag_destinations_dict:
if draggables_dict[drop_group].get(self):
for drop_recipient in drag_destinations_dict[drop_group]:
if not drop_recipient in drag_destination_list:
drag_destination_list.append(drop_recipient)
debug.print("droppable_zone_objects:", self.droppable_zone_objects, level=DEBUG_DRAG_FINISH)
for obj in self.droppable_zone_objects:
if not obj in drag_destination_list:
drag_destination_list.append(obj)
# for obj in drag_destination_list:
# debug.print ("Possible drop destination:", obj.text)
# --- end of assemble list
# -------------------------------------------------------------------------
# --- check which object(s) did receive this drop.
debug.print("drag_destination_list:", drag_destination_list, level=DEBUG_DRAG_FINISH)
for obj in drag_destination_list:
debug.print("Title:", debug_widget_title(self), level=DEBUG_DRAG_FINISH)
debug.print("Touch position:", self.touch_x, self.touch_y,
"in-Window position:", touch_window_x, touch_window_y,
"Window:", Window.mouse_pos[0], Window.mouse_pos[1],
level=DEBUG_DRAG_FINISH)
debug.print("Check if drop ok: touch:", touch_window_x, touch_window_y,
"Drag Destination Object:", obj, end=" ",
level=DEBUG_DRAG_FINISH)
debug.print("Position in Window:",
obj.to_window(obj.x, obj.y), "WxH:", obj.width, obj.height, end=" ",
level=DEBUG_DRAG_FINISH)
# TODO: IF object does not subclass DropDestination, it won't have this
# TODO: method defined!
if self.widget_absolute_collide_point(obj, touch_window_x, touch_window_y):
debug.print("COLLIDE: True", end=" ", level=DEBUG_DRAG_FINISH)
if hasattr(obj, 'is_drop_eligible'):
if obj.is_drop_eligible is False:
debug.print("ELIGIBLE?", obj.is_drop_eligible, obj, level=DEBUG_DRAG_FINISH)
self.found_drop_recipients_ok_dict[obj] = False
continue
#else:
# pass
if obj is self._old_parent and not self.can_drop_into_parent:
self.found_drop_recipients_ok_dict[obj] = False
debug.print("OK: False", level=DEBUG_DRAG_FINISH)
else:
self.found_drop_recipients_ok_dict[obj] = True
debug.print("OK: True", level=DEBUG_DRAG_FINISH)
else:
debug.print("COLLIDE: False", level=DEBUG_DRAG_FINISH)
pass
# --- end of check
# -------------------------------------------------------------------------
# - (Possibly) perform animations
# - if a drop recipient is found (could include the parent), and it's ok
# to drop there (parent may not be, so this could be false), then set
# - not_drop_ok_do_animation = False
# - got_one_successful_drop = True
# - drop_ok_do_animation = False (if dropped onto old parent)
# - Run self.drop_func or self.failed_drop_func
drop_ok_do_animation = self.drop_ok_do_animation
not_drop_ok_do_animation = self.not_drop_ok_do_animation
got_one_successful_drop = False
got_one_drop_not_parent = False
# -------------------------------------------------------------------------
for found_drop_recipient, dropped_ok in self.found_drop_recipients_ok_dict.items():
debug.print("Drop Recipient:", found_drop_recipient, dropped_ok, level=DEBUG_DRAG_FINISH)
if dropped_ok:
not_drop_ok_do_animation = False
got_one_successful_drop = True
if found_drop_recipient != self._old_parent:
# TODO: Animation runs when the widget is not added to the
# TODO: drop recipient. This is a problem, because the widget
# TODO: exists but is invisible!
# TODO: for app_relative_layout: If a copied widget is dragged,
# TODO: its original parent may be the Window (not a widget).
# TODO: Therefore, animation is running when we don't want it.
got_one_drop_not_parent = True
if not got_one_drop_not_parent:
drop_ok_do_animation = False
# -------------------------------------------------------------------------
# Perform after-drop functions
if got_one_successful_drop:
debug.print("I will call on_successful_drop", level=DEBUG_DRAG_FINISH)
if drop_ok_do_animation:
self.on_successful_drop(animation=True)
else:
self.on_successful_drop(animation=False)
# self.post_successful_animation(None, self)
return
else:
# TODO: Do we want to run the animation? MIKE check this... is it right
# TODO: to be here???
debug.print("I will call on_unsuccessful_drop", level=DEBUG_DRAG_FINISH)
if not_drop_ok_do_animation:
self.on_unsuccessful_drop(animation=True)
else:
self.on_unsuccessful_drop(animation=False)
# On a successful drop, the widget will end up with no parent whatsoever.
debug.print ("THE END. Drag finished, me:", self, "parent:", self.parent, level=DEBUG_DRAG_FINISH)
def widget_absolute_collide_point(self, widget, x, y):
(widget_x, widget_y) = widget.to_window(widget.x, widget.y)
return widget_x <= x <= (widget.width + widget_x) and widget_y <= y <= (widget_y + widget.height)
def un_root_me(self, widget="dumb", anim="dumb2"):
global DEBUG_UNROOT_ME
debug.print ("Unroot start, parent: ", self.parent, "Me:", self, level=DEBUG_UNROOT_ME)
self.get_root_window().remove_widget(self)
debug.print ("unroot done, parent: ", self.parent, "Me:", self, level=DEBUG_UNROOT_ME)
def on_being_dragged(self):
pass
def reborn(self, widget=None, anim=None):
global DEBUG_REBORN
# print ("REBORN!! ================================================")
debug.print ("self.reborn(), old parent:", self._old_parent, level=DEBUG_REBORN)
self.un_root_me()
# BUG: We don't just add the reborn child to the parent.
# Adding child in the first position (the highest index) fails due
# to a bug in Kivy. We remove all remaining children and then re-add
# the bunch (including the original child which was not dropped in a new
# area).
for childs in self._old_parent.children[:]:
self._old_parent.remove_widget(childs)
for childs in self._old_parent_children_reversed_list:
debug.print ("self.reborn(), add ", childs, "to", self._old_parent, level=DEBUG_REBORN)
self._old_parent.add_widget(childs)
return
#
# As of this moment, this code is unreachable- it's a placeholder.
# See https://github.com/kivy/kivy/issues/4497
self._old_parent.add_widget(self, index=self._old_index)
def root_parent(self, widget):
orig_size = widget.size
if not self.remove_on_drag:
self.root_window.add_widget(widget)
return
if widget.parent:
parent = widget.parent
parent.remove_widget(widget)
parent.get_root_window().add_widget(widget)
widget.size_hint = (None, None)
widget.size = orig_size
def animate_failed_drop(self, **kwargs):
#print ("ANIMATE............................................")
#print ("X", Window.mouse_pos[0], "Y", Window.mouse_pos[1], self.pos,
# "OLD:", self._old_drag_pos)
anim = Animation(pos=self._old_drag_pos,
duration=self.not_drop_ok_animation_time, t="in_quad")
anim.bind(on_complete=self.post_unsuccessful_animation)
anim.start(self)
def un_root_and_close(self, animation_object=None, same_as_self=None):
self.un_root_me()
self.close()
def on_unsuccessful_drop(self, animation=True, widget=None):
"""
Called at the end of an unsuccessful drop, after the widget's animation is finished.
:param animation:
:param widget:
:return:
"""
debug.print("called from dragndropwidget; animate?", animation)
if animation is True:
self.animate_failed_drop()
if self.failed_drop_func is not None:
self.failed_drop_func(self, *self.failed_drop_args)
# TODO: CHECK THIS MIKE
if animation is not True: # The animation will call this, so only call here if not animating
self.post_unsuccessful_animation() # Simply resets some flags; opacity will be set after the animation
# TODO: PERFORM THIS HERE? Moved from post_unsuccessful_animation
# self.set_drag_finish_state(False)
def post_unsuccessful_animation(self, animation=None, widget=None):
"""
A bit of a misnomer, this is called to clean up after any unsuccessful drop,
but if there's an animation, it will be at the end of the animation.
:param animation: the Animation object that called this, or nothing (not used)
:param widget: the widget that this is run from, or nothing (not used)
:return: nothing
"""
if self.remove_on_drag:
if self.rebirth_failed_drop: # True by default
self.reborn()
else:
if self.close_on_fail is True:
self.un_root_and_close()
return
else:
self.un_root_and_close()
return
self.set_drag_finish_state()
# TODO: If a drop_func is defined, which runs first?
# TODO: EACH _args for the funcs must have the calling widget!
def on_successful_drop(self, animation=True):
"""
If we want an end-of-drop animation:
Called at the end of a successful drop, after the widget's animation is finished.
If we do not want an animation:
Called immediately at the end of a successful drop.
:param animation: the Animation object that called this, or nothing (not used)
:param widget: the widget that this is run from, or nothing (not used)
:return: nothing
"""
global DEBUG_SUCCESSFUL_DROP
debug.print ("on_successful_drop: ================================================================", level=DEBUG_SUCCESSFUL_DROP)
debug.print ("on_successful_drop 1, Parent:", self.parent, "object: ", self, "copy?", self.copy, level=DEBUG_SUCCESSFUL_DROP)
debug.print ("object:", self, "added args:", *self.drop_args, level=DEBUG_SUCCESSFUL_DROP)
debug.print ("is_double_tap?", self.is_double_tap, level=DEBUG_SUCCESSFUL_DROP)
if animation is True:
anim = Animation(opacity=0, duration=self.drop_ok_animation_time, t="in_quad")
anim.bind(on_complete=self.post_successful_animation)
anim.start(self)
# traceback.debug.print_stack()
if self.drop_func is not None:
debug.print (hex(id(self)), "Calling drop_func...", level=DEBUG_SUCCESSFUL_DROP)
debug.print ("With args:", self, *self.drop_args, level=DEBUG_SUCCESSFUL_DROP)
self.drop_func(self, *self.drop_args)
for found_drop_recipient, dropped_ok in self.found_drop_recipients_ok_dict.items():
if dropped_ok:
if getattr(found_drop_recipient, "drop_func", None) is not None:
debug.print (hex(id(self)), "Calling recipient's drop_func", level=DEBUG_SUCCESSFUL_DROP)
found_drop_recipient.drop_func(self)
# self.set_drag_finish_state(False) # Opacity will be set after the animation.
if animation is not True:
self.post_successful_animation(None, self)
debug.print ("on_successful_drop: === end ========================================================", level=DEBUG_SUCCESSFUL_DROP)
def post_successful_animation(self, animation, widget):
"""
This is called to clean up after any successful drop's animation, but it's
a misnomer, as it is also called at the end of a successful
drop without an animation.
:param animation: The Animation object from kivy.
:param widget: Just the widget calling this, aka self.
:return:
"""
global DEBUG_POST_SUCCESSFUL_ANIM
debug.print ("post_successful_animation 1, Parent:", self.parent, "object: ", self, "copy?", self.copy, level=DEBUG_POST_SUCCESSFUL_ANIM)
self.un_root_me()
debug.print ("post_successful_animation 2, Parent:", self.parent, "object: ", self, "copy?", self.copy, level=DEBUG_POST_SUCCESSFUL_ANIM)
self.opacity = self._old_opacity
for found_drop_recipient, dropped_ok in self.found_drop_recipients_ok_dict.items():
if dropped_ok:
if getattr(found_drop_recipient, "post_drop_func", None) is not None:
found_drop_recipient.post_drop_func(self)
self.set_drag_finish_state()
|
|
import unittest
import responses
import json as json
import consulalerting.plugins as plugins
import consulalerting.settings as settings
import consulalerting.utilities as utilities
import consulalerting.ConsulHealthStruct as ConsulHealthStruct
from mock import patch, MagicMock, Mock
from requests import HTTPError
ALL_REQUESTS_ALERTING_AVAILABLE_PLUGINS = [
"hipchat", "slack", "mailgun", "pagerduty", "influxdb", "cachet"]
ALL_REQUESTS_PLUGINS_ALERT_LIST = [{"Node": "consul",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Tags": ["hipchat", "slack", "mailgun", "pagerduty", "influxdb", "devops", "db",
"redis"],
"ServiceName": "redis",
"Notes": "",
"Status": "critical",
"ServiceID": "redis",
"Output": "Usage: check_redis.py [options]\n\ncheck_redis.py: error: Warning level required\n"}]
CONSUL_HEALTH_STRUCT_ALL_REQUESTS_PLUGINS_ALERT_LIST = [
ConsulHealthStruct.ConsulHealthStruct(**obj) for obj in ALL_REQUESTS_PLUGINS_ALERT_LIST]
CONSUL_SLACK = {"api_token": "testing123testing123",
"rooms": {"devops": ""}}
CONSUL_HIPCHAT = {"api_token": "testing123testing123",
"url": "https://api.hipchat.com/v1/",
"rooms": {"devops": 1}}
CONSUL_MAILGUN = {"api_token": "testing123testing123",
"mailgun_domain": "sandboxtesting123testing123.mailgun.org",
"from": "[email protected]",
"teams": {"devops": ["[email protected]", "[email protected]"]}
}
CONSUL_PAGERDUTY = {"teams": {"devops": ""}}
CONSUL_INFLUXDB = {"url":"http://localhost:8086/write", "series":"test", "databases":{"db":"mydb"}}
CONSUL_ELASTICSEARCHLOG = {"logpath": "/path/to/log"}
CONSUL_CACHET = {"api_token": "notreallyatoken",
"site_url": "http://status.company.com",
"notify_subscribers": False,
}
class PluginsTests(unittest.TestCase):
def setUp(self):
self.obj = CONSUL_HEALTH_STRUCT_ALL_REQUESTS_PLUGINS_ALERT_LIST[0]
self.message_template = "Service {name}: "\
"is in a {state} state on {node}. "\
"Output from check: {output}".format(name=self.obj.ServiceName,
state=self.obj.Status,
node=self.obj.Node,
output=self.obj.Output)
@responses.activate
def test_notifySlack(self):
responses.add(
responses.POST, "https://slack.com/api/chat.postMessage", json=True, status=200)
status_code = plugins.notify_slack(
self.message_template, ["devops"], CONSUL_SLACK)
self.assertEqual(200, status_code)
@responses.activate
def test_notifySlackHipchat(self):
responses.add(
responses.POST, "https://slack.com/api/chat.postMessage", json=True, status=200)
responses.add(
responses.POST, "https://api.hipchat.com/v1/", json=True, status=200)
status_code = plugins.notify_slack(
self.message_template, ["devops"], CONSUL_SLACK)
self.assertEqual(200, status_code)
status_code = 0
status_code = plugins.notify_hipchat(
self.obj, self.message_template, ["devops"], CONSUL_HIPCHAT)
self.assertEqual(200, status_code)
@responses.activate
def test_notifyPagerduty(self):
responses.add(
responses.POST, "https://events.pagerduty.com/generic/2010-04-15/create_event.json", json=True, status=200)
status_code = plugins.notify_pagerduty(
self.obj, self.message_template, ["devops"], CONSUL_PAGERDUTY)
self.assertEqual(200, status_code)
@responses.activate
def test_notifyMailgun(self):
responses.add(
responses.POST, "https://api.mailgun.net/v2/{domain}/messages".format(
domain=CONSUL_MAILGUN["mailgun_domain"]),
json=True, status=200)
status_code = plugins.notify_mailgun(
self.message_template, ["devops"], CONSUL_MAILGUN)
self.assertEqual(200, status_code)
@responses.activate
def test_notifyHipchat(self):
responses.add(
responses.POST, "https://api.hipchat.com/v1/", json=True, status=200)
status_code = plugins.notify_hipchat(
self.obj, self.message_template, ["devops"], CONSUL_HIPCHAT)
self.assertEqual(200, status_code)
@responses.activate
def test_notifyInfluxdb(self):
responses.add(
responses.POST, "http://localhost:8086/write", json=True, status=204)
status_code = plugins.notify_influxdb(
self.obj, self.message_template, ["db"], CONSUL_INFLUXDB)
self.assertEqual(204, status_code)
def test_Cachet_no_api_token(self):
"""
No POST due to missing api token
"""
consul_cachet_sub_api_token = CONSUL_CACHET
consul_cachet_sub_api_token['api_token'] = None
status_code = plugins.notify_cache(self.obj, self.message_template, consul_cachet_sub_api_token)
self.assertEqual(None, status_code)
def test_Cachet_no_site_url(self):
"""
No POST due to missing site url
"""
consul_cachet_sub_api_token = CONSUL_CACHET
consul_cachet_sub_api_token['site_url'] = None
status_code = plugins.notify_cache(self.obj, self.message_template, consul_cachet_sub_api_token)
self.assertEqual(None, status_code)
@responses.activate
def test_Cachet_get_post(self):
"""
Successfully GET components, identifies intersecting tag, and POSTs incident to Cachet
"""
get_data = {
'data': [
{
"id": 2,
"name": "Redis",
},
{
"id": 4,
"name": "mysql",
}
]
}
responses.add(responses.GET, "http://status.company.com/api/v1/components", json=get_data, status=200)
responses.add(responses.POST, "http://status.company.com/api/v1/incidents", json=True, status=200)
status_code = plugins.notify_cache(self.obj, self.message_template, CONSUL_CACHET)
self.assertEqual(200, status_code)
@responses.activate
def test_Cachet_get_no_data_post_skipped(self):
"""
Successfully GET response but no Component data, does not find tag intersection, does not POST incident
"""
get_data = {
'data': []
}
responses.add(responses.GET, "http://status.company.com/api/v1/components", json=get_data, status=200)
status_code = plugins.notify_cache(self.obj, self.message_template, CONSUL_CACHET)
self.assertEqual(None, status_code)
@responses.activate
def test_Cachet_get_no_intersecting_tag_post_skipped(self):
"""
Successfully GET components
However, because the retrieved components do not match any of the provided tags
A ValueError is encountered and we do not POST incident
"""
get_data = {
'data': [
{
"id": 2,
"name": "notRedis", # we change the name in this test to something we know does not match
},
{
"id": 4,
"name": "mysql",
}
]
}
responses.add(responses.GET, "http://status.company.com/api/v1/components", json=get_data, status=200)
status_code = plugins.notify_cache(self.obj, self.message_template, CONSUL_CACHET)
self.assertEqual(None, status_code)
@responses.activate
def test_Cachet_get_fail_post_skip(self):
"""
Unsuccessful GET request, skips incident POST as a result
"""
responses.add(responses.GET, "http://status.company.com/api/v1/components", status=400)
status_code = plugins.notify_cache(self.obj, self.message_template, CONSUL_CACHET)
self.assertEqual(None, status_code)
@responses.activate
def test_Cachet_get_post_fail(self):
"""
Successfully GET components, identifies intersecting tag, but POST fails
"""
get_data = {
'data': [
{
"id": 2,
"name": "Redis",
},
{
"id": 4,
"name": "mysql",
}
]
}
responses.add(responses.GET, "http://status.company.com/api/v1/components", json=get_data, status=200)
responses.add(responses.POST, "http://status.company.com/api/v1/incidents", status=400)
status_code = plugins.notify_cache(self.obj, self.message_template, CONSUL_CACHET)
self.assertEqual(None, status_code)
@responses.activate
def test_notifySlackFail(self):
responses.add(
responses.POST, "https://slack.com/api/chat.postMessage", json=True, status=400)
status_code = plugins.notify_slack(
self.message_template, ["devops"], CONSUL_SLACK)
self.assertNotEqual(200, status_code)
@responses.activate
def test_notifyPagerdutyFail(self):
responses.add(
responses.POST, "https://events.pagerduty.com/generic/2010-04-15/create_event.json", json=True, status=40)
status_code = plugins.notify_pagerduty(
self.obj, self.message_template, ["devops"], CONSUL_PAGERDUTY)
self.assertNotEqual(200, status_code)
@responses.activate
def test_notifyMailgunFail(self):
responses.add(
responses.POST, "https://api.mailgun.net/v2/{domain}/messages".format(
domain=CONSUL_MAILGUN["mailgun_domain"]),
json=True, status=400)
status_code = plugins.notify_mailgun(
self.message_template, ["devops"], CONSUL_MAILGUN)
self.assertNotEqual(200, status_code)
@responses.activate
def test_notifyHipchatFail(self):
responses.add(
responses.POST, "https://api.hipchat.com/v1/", json=True, status=400)
status_code = plugins.notify_hipchat(
self.obj, self.message_template, ["devops"], CONSUL_HIPCHAT)
self.assertNotEqual(200, status_code)
@responses.activate
def test_notifyInfluxdbFail(self):
responses.add(
responses.POST, "http://localhost:8086/write", json=True, status=400)
status_code = plugins.notify_influxdb(
self.obj, self.message_template, ["db"], CONSUL_INFLUXDB)
self.assertNotEqual(204, status_code)
def test_notifyElasticSearchLog(self):
open_mock = MagicMock()
with patch('__builtin__.open', open_mock):
open_mock.return_value = MagicMock(spec=file)
plugins.notify_elasticsearchlog(self.obj, self.message_template, CONSUL_ELASTICSEARCHLOG)
file_handle = open_mock.return_value.__enter__.return_value
file_handle.write.assert_called_with("\n")
|
|
import logging
from urllib.parse import urljoin
import requests
from moneybird.authentication import Authentication
VERSION = '0.1.3'
logger = logging.getLogger('moneybird')
class MoneyBird(object):
"""
Client for the MoneyBird API.
:param authentication: The authentication method to use.
"""
version = 'v2'
base_url = 'https://moneybird.com/api/'
def __init__(self, authentication: Authentication):
self.authentication = authentication
self.session = None
self.renew_session()
def get(self, resource_path: str, administration_id: int = None):
"""
Performs a GET request to the endpoint identified by the resource path.
Example:
>>> from moneybird import MoneyBird, TokenAuthentication
>>> moneybird = MoneyBird(TokenAuthentication('access_token'))
>>> moneybird.get('administrations')
[{'id': 123, 'name': 'Parkietje B.V.', 'language': 'nl', ...
>>> moneybird.get('contacts/synchronization', 123)
[{'id': '143273868766741508', 'version': 1450856630}, ...
:param resource_path: The resource path.
:param administration_id: The administration id (optional, depending on the resource path).
:return: The decoded JSON response for the request.
"""
response = self.session.get(
url=self._get_url(administration_id, resource_path),
)
return self._process_response(response)
def post(self, resource_path: str, data: dict, administration_id: int = None):
"""
Performs a POST request to the endpoint identified by the resource path. POST requests are usually used to add
new data.
Example:
>>> from moneybird import MoneyBird, TokenAuthentication
>>> moneybird = MoneyBird(TokenAuthentication('access_token'))
>>> data = {'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed'}
>>> moneybird.post('webhooks', data, 123)
{'id': '143274315994891267', 'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed', ...
:param resource_path: The resource path.
:param data: The data to send to the server.
:param administration_id: The administration id (optional, depending on the resource path).
:return: The decoded JSON response for the request.
"""
response = self.session.post(
url=self._get_url(administration_id, resource_path),
json=data,
)
return self._process_response(response)
def patch(self, resource_path: str, data: dict, administration_id: int = None):
"""
Performs a PATCH request to the endpoint identified by the resource path. PATCH requests are usually used to
change existing data.
From a client perspective, PATCH requests behave similarly to POST requests.
:param resource_path: The resource path.
:param data: The data to send to the server.
:param administration_id: The administration id (optional, depending on the resource path).
:return: The decoded JSON response for the request.
"""
response = self.session.patch(
url=self._get_url(administration_id, resource_path),
json=data,
)
return self._process_response(response)
def delete(self, resource_path: str, administration_id: int = None):
"""
Performs a DELETE request to the endpoint identified by the resource path. DELETE requests are usually used to
(permanently) delete existing data. USE THIS METHOD WITH CAUTION.
From a client perspective, DELETE requests behave similarly to GET requests.
:param resource_path: The resource path.
:param administration_id: The administration id (optional, depending on the resource path).
:return: The decoded JSON response for the request.
"""
response = self.session.delete(
url=self._get_url(administration_id, resource_path),
)
return self._process_response(response)
def renew_session(self):
"""
Clears all session data and starts a new session using the same settings as before.
This method can be used to clear session data, e.g., cookies. Future requests will use a new session initiated
with the same settings and authentication method.
"""
logger.debug("API session renewed")
self.session = self.authentication.get_session()
self.session.headers.update({
'User-Agent': 'MoneyBird for Python %s' % VERSION,
'Accept': 'application/json',
})
@classmethod
def _get_url(cls, administration_id: int, resource_path: str):
"""
Builds the URL to the API endpoint specified by the given parameters.
:param administration_id: The ID of the administration (may be None).
:param resource_path: The path to the resource.
:return: The absolute URL to the endpoint.
"""
url = urljoin(cls.base_url, '%s/' % cls.version)
if administration_id is not None:
url = urljoin(url, '%s/' % administration_id)
url = urljoin(url, '%s.json' % resource_path)
return url
@staticmethod
def _process_response(response: requests.Response, expected: list = []) -> dict:
"""
Processes an API response. Raises an exception when appropriate.
The exception that will be raised is MoneyBird.APIError. This exception is subclassed so implementing programs
can easily react appropriately to different exceptions.
The following subclasses of MoneyBird.APIError are likely to be raised:
- MoneyBird.Unauthorized: No access to the resource or invalid authentication
- MoneyBird.Throttled: Access (temporarily) denied, please try again
- MoneyBird.NotFound: Resource not found, check resource path
- MoneyBird.InvalidData: Validation errors occured while processing your input
- MoneyBird.ServerError: Error on the server
:param response: The response to process.
:param expected: A list of expected status codes which won't raise an exception.
:return: The useful data in the response (may be None).
"""
responses = {
200: None,
201: None,
204: None,
400: MoneyBird.Unauthorized,
401: MoneyBird.Unauthorized,
403: MoneyBird.Throttled,
404: MoneyBird.NotFound,
406: MoneyBird.NotFound,
422: MoneyBird.InvalidData,
429: MoneyBird.Throttled,
500: MoneyBird.ServerError,
}
logger.debug("API request: %s %s\n" % (response.request.method, response.request.url) +
"Response: %s %s" % (response.status_code, response.text))
if response.status_code not in expected:
if response.status_code not in responses:
logger.error("API response contained unknown status code")
raise MoneyBird.APIError(response, "API response contained unknown status code")
elif responses[response.status_code] is not None:
try:
description = response.json()['error']
except (AttributeError, TypeError, KeyError, ValueError):
description = None
raise responses[response.status_code](response, description)
try:
data = response.json()
except ValueError:
logger.error("API response is not JSON decodable")
data = None
return data
class APIError(Exception):
"""
Exception for cases where communication with the API went wrong.
This exception is specialized into a number of exceptions with the exact same properties.
"""
def __init__(self, response: requests.Response, description: str = None):
"""
:param response: The API response.
:param description: Description of the error.
"""
self._response = response
msg = 'API error %d' % response.status_code
if description:
msg += ': %s' % description
super(MoneyBird.APIError, self).__init__(msg)
@property
def status_code(self):
"""
HTTP status code of the request.
"""
return self._response.status_code
@property
def response(self):
"""
JSON encoded data of the response.
"""
return self._response.json()
@property
def request(self):
"""
Short string representation of the request (method and URL).
"""
return '%s %s' % (self._response.request.method, self._response.request.url)
class Unauthorized(APIError):
pass
class NotFound(APIError):
pass
class InvalidData(APIError):
pass
class Throttled(APIError):
pass
class ServerError(APIError):
pass
|
|
"""
Support for Denon Network Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.denon/
"""
import logging
import telnetlib
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_SELECT_SOURCE,
SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_STOP, SUPPORT_PLAY, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Music station'
SUPPORT_DENON = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE \
SUPPORT_MEDIA_MODES = SUPPORT_PAUSE | SUPPORT_STOP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
NORMAL_INPUTS = {'Cd': 'CD', 'Dvd': 'DVD', 'Blue ray': 'BD', 'TV': 'TV',
'Satelite / Cable': 'SAT/CBL', 'Game': 'GAME',
'Game2': 'GAME2', 'Video Aux': 'V.AUX', 'Dock': 'DOCK'}
MEDIA_MODES = {'Tuner': 'TUNER', 'Media server': 'SERVER',
'Ipod dock': 'IPOD', 'Net/USB': 'NET/USB',
'Rapsody': 'RHAPSODY', 'Napster': 'NAPSTER',
'Pandora': 'PANDORA', 'LastFM': 'LASTFM',
'Flickr': 'FLICKR', 'Favorites': 'FAVORITES',
'Internet Radio': 'IRADIO', 'USB/IPOD': 'USB/IPOD'}
# Sub-modes of 'NET/USB'
# {'USB': 'USB', 'iPod Direct': 'IPD', 'Internet Radio': 'IRP',
# 'Favorites': 'FVP'}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Denon platform."""
denon = DenonDevice(config.get(CONF_NAME), config.get(CONF_HOST))
if denon.update():
add_devices([denon])
return True
else:
return False
class DenonDevice(MediaPlayerDevice):
"""Representation of a Denon device."""
def __init__(self, name, host):
"""Initialize the Denon device."""
self._name = name
self._host = host
self._pwstate = 'PWSTANDBY'
self._volume = 0
# Initial value 60dB, changed if we get a MVMAX
self._volume_max = 60
self._source_list = NORMAL_INPUTS.copy()
self._source_list.update(MEDIA_MODES)
self._muted = False
self._mediasource = ''
self._mediainfo = ''
self._should_setup_sources = True
def _setup_sources(self, telnet):
# NSFRN - Network name
self._name = self.telnet_request(telnet, 'NSFRN ?')[len('NSFRN '):]
# SSFUN - Configured sources with names
self._source_list = {}
for line in self.telnet_request(telnet, 'SSFUN ?', all_lines=True):
source, configured_name = line[len('SSFUN'):].split(" ", 1)
self._source_list[configured_name] = source
# SSSOD - Deleted sources
for line in self.telnet_request(telnet, 'SSSOD ?', all_lines=True):
source, status = line[len('SSSOD'):].split(" ", 1)
if status == 'DEL':
for pretty_name, name in self._source_list.items():
if source == name:
del self._source_list[pretty_name]
break
@classmethod
def telnet_request(cls, telnet, command, all_lines=False):
"""Execute `command` and return the response."""
_LOGGER.debug('Sending: "%s"', command)
telnet.write(command.encode('ASCII') + b'\r')
lines = []
while True:
line = telnet.read_until(b'\r', timeout=0.2)
if not line:
break
lines.append(line.decode('ASCII').strip())
_LOGGER.debug('Recived: "%s"', line)
if all_lines:
return lines
return lines[0]
def telnet_command(self, command):
"""Establish a telnet connection and sends `command`."""
telnet = telnetlib.Telnet(self._host)
_LOGGER.debug('Sending: "%s"', command)
telnet.write(command.encode('ASCII') + b'\r')
telnet.read_very_eager() # skip response
telnet.close()
def update(self):
"""Get the latest details from the device."""
try:
telnet = telnetlib.Telnet(self._host)
except OSError:
return False
if self._should_setup_sources:
self._setup_sources(telnet)
self._should_setup_sources = False
self._pwstate = self.telnet_request(telnet, 'PW?')
for line in self.telnet_request(telnet, 'MV?', all_lines=True):
if line.startswith('MVMAX '):
# only grab two digit max, don't care about any half digit
self._volume_max = int(line[len('MVMAX '):len('MVMAX XX')])
continue
if line.startswith('MV'):
self._volume = int(line[len('MV'):])
self._muted = (self.telnet_request(telnet, 'MU?') == 'MUON')
self._mediasource = self.telnet_request(telnet, 'SI?')[len('SI'):]
if self._mediasource in MEDIA_MODES.values():
self._mediainfo = ""
answer_codes = ["NSE0", "NSE1X", "NSE2X", "NSE3X", "NSE4", "NSE5",
"NSE6", "NSE7", "NSE8"]
for line in self.telnet_request(telnet, 'NSE', all_lines=True):
self._mediainfo += line[len(answer_codes.pop(0)):] + '\n'
else:
self._mediainfo = self.source
telnet.close()
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._pwstate == 'PWSTANDBY':
return STATE_OFF
if self._pwstate == 'PWON':
return STATE_ON
return STATE_UNKNOWN
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / self._volume_max
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source_list(self):
"""List of available input sources."""
return sorted(list(self._source_list.keys()))
@property
def media_title(self):
"""Current media info."""
return self._mediainfo
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._mediasource in MEDIA_MODES.values():
return SUPPORT_DENON | SUPPORT_MEDIA_MODES
else:
return SUPPORT_DENON
@property
def source(self):
"""Return the current input source."""
for pretty_name, name in self._source_list.items():
if self._mediasource == name:
return pretty_name
def turn_off(self):
"""Turn off media player."""
self.telnet_command('PWSTANDBY')
def volume_up(self):
"""Volume up media player."""
self.telnet_command('MVUP')
def volume_down(self):
"""Volume down media player."""
self.telnet_command('MVDOWN')
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.telnet_command('MV' +
str(round(volume * self._volume_max)).zfill(2))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.telnet_command('MU' + ('ON' if mute else 'OFF'))
def media_play(self):
"""Play media media player."""
self.telnet_command('NS9A')
def media_pause(self):
"""Pause media player."""
self.telnet_command('NS9B')
def media_stop(self):
"""Pause media player."""
self.telnet_command('NS9C')
def media_next_track(self):
"""Send the next track command."""
self.telnet_command('NS9D')
def media_previous_track(self):
"""Send the previous track command."""
self.telnet_command('NS9E')
def turn_on(self):
"""Turn the media player on."""
self.telnet_command('PWON')
def select_source(self, source):
"""Select input source."""
self.telnet_command('SI' + self._source_list.get(source))
|
|
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.relational import Equality
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, range
from sympy.core.containers import Tuple
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
from sympy.matrices import Matrix
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
orientation *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = sympify(function)
if hasattr(function, 'func') and function.func is Equality:
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Only limits with lower and upper bounds are supported; the indefinite form
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the dummy variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def as_dummy(self):
"""
Replace instances of the given dummy variables with explicit dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an object.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If the object supperts the "integral at" limit ``(x,)`` it
is not treated as a dummy, but the explicit form, ``(x, x)``
of length 2 does treat the variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
the symbols which cannot be changed by subs() are clearly seen as
those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in range(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return self.func(f, *limits)
def _eval_interval(self, x, a, b):
limits = [( i if i[0] != x else (x,a,b) ) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s,n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x,a
>>> Integral(a*x**2,x).subs(x,4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for intgrals
change_index : Perform mapping on the sum and product dummy variables
"""
from sympy.core.function import AppliedUndef, UndefinedFunction
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old, AppliedUndef) or isinstance(old, UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
#
# This constructor only differs from ExprWithLimits
# in the application of the orientation variable. Perhaps merge?
function = sympify(function)
if hasattr(function, 'func') and function.func is Equality:
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
" specify dummy variables for %s. If the integrand contains"
" more than one free symbol, an integration variable should"
" be supplied explicitly e.g., integrate(f(x, y), x)"
% function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not w.has(*self.variables))
return Mul(*out[True])*self.func(Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return Add(*[ self.func(i, *self.limits) for i in summand.args ])
elif summand.is_Matrix:
return Matrix._new(summand.rows, summand.cols,
[self.func(i, *self.limits) for i in summand._mat])
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for remote diagnostics of system VMs
"""
import urllib
from marvin.cloudstackAPI import (runDiagnostics, getDiagnosticsData)
from marvin.cloudstackTestCase import cloudstackTestCase
# Import Local Modules
from marvin.codes import FAILED
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine)
from marvin.lib.common import (get_domain,
get_zone,
get_test_template,
list_ssvms,
list_routers)
from marvin.lib.utils import (cleanup_resources)
from nose.plugins.attrib import attr
class TestRemoteDiagnostics(cloudstackTestCase):
"""
Test remote diagnostics with system VMs and VR as root admin
"""
@classmethod
def setUpClass(cls):
testClient = super(TestRemoteDiagnostics, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.hypervisor = testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
template = get_test_template(
cls.apiclient,
cls.zone.id,
cls.hypervisor
)
if template == FAILED:
cls.fail("get_test_template() failed to return template")
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.vm_1 = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.cleanup = [
cls.account,
cls.service_offering
]
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(
TestRemoteDiagnostics,
cls
).getClsTestClient().getApiClient()
# Clean up, terminate the created templates
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_01_ping_in_vr_success(self):
'''
Test Ping command execution in VR
'''
# Validate the following:
# 1. Ping command is executed remotely on VR
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = '8.8.8.8'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in VR')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_02_ping_in_vr_failure(self):
'''
Test Ping command execution in VR
'''
# Validate the following:
# 1. Ping command is executed remotely on VR
# 2. Validate Ping command execution with a non-existent/pingable IP address
if self.hypervisor.lower() == 'simulator':
raise self.skipTest("Skipping negative test case for Simulator hypervisor")
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = '192.0.2.2'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertNotEqual(
'0',
cmd_response.exitcode,
'Check diagnostics command returns a non-zero exit code')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_03_ping_in_ssvm_success(self):
'''
Test Ping command execution in SSVM
'''
# Validate the following:
# 1. Ping command is executed remotely on SSVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = '8.8.8.8'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_04_ping_in_ssvm_failure(self):
'''
Test Ping command execution in SSVM
'''
# Validate the following:
# 1. Ping command is executed remotely on SSVM
# 2. Validate Ping command execution with a non-existent/pingable IP address
if self.hypervisor.lower() == 'simulator':
raise self.skipTest("Skipping negative test case for Simulator hypervisor")
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = '192.0.2.2'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertNotEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_05_ping_in_cpvm_success(self):
'''
Test Ping command execution in CPVM
'''
# Validate the following:
# 1. Ping command is executed remotely on CPVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_ssvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = '8.8.8.8'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in CPVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_06_ping_in_cpvm_failure(self):
'''
Test Ping command execution in CPVM
'''
# Validate the following:
# 1. Ping command is executed remotely on CPVM
# 2. Validate Ping command execution with a non-existent/pingable IP address
if self.hypervisor.lower() == 'simulator':
raise self.skipTest("Skipping negative test case for Simulator hypervisor")
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_ssvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = '192.0.2.2'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertNotEqual(
'0',
cmd_response.exitcode,
'Check diagnostics command returns a non-zero exit code'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_07_arping_in_vr(self):
'''
Test Arping command execution in VR
'''
# Validate the following:
# 1. Arping command is executed remotely on VR
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = router.gateway
cmd.type = 'arping'
cmd.params = "-I eth2"
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in VR')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_08_arping_in_ssvm(self):
'''
Test Arping command execution in SSVM
'''
# Validate the following:
# 1. Arping command is executed remotely on SSVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = ssvm.gateway
cmd.type = 'arping'
cmd.params = '-I eth2'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_09_arping_in_cpvm(self):
'''
Test Arping command execution in CPVM
'''
# Validate the following:
# 1. Arping command is executed remotely on CPVM
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = cpvm.gateway
cmd.type = 'arping'
cmd.params = '-I eth2'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in CPVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_10_traceroute_in_vr(self):
'''
Test Arping command execution in VR
'''
# Validate the following:
# 1. Arping command is executed remotely on VR
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = '8.8.4.4'
cmd.type = 'traceroute'
cmd.params = "-m 10"
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in VR')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_11_traceroute_in_ssvm(self):
'''
Test Traceroute command execution in SSVM
'''
# Validate the following:
# 1. Traceroute command is executed remotely on SSVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = '8.8.4.4'
cmd.type = 'traceroute'
cmd.params = '-m 10'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Traceroute in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_12_traceroute_in_cpvm(self):
'''
Test Traceroute command execution in CPVMM
'''
# Validate the following:
# 1. Traceroute command is executed remotely on CPVM
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVMM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = '8.8.4.4'
cmd.type = 'traceroute'
cmd.params = '-m 10'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Traceroute in CPVM'
)
'''
Add Get Diagnostics data BVT
'''
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_13_retrieve_vr_default_files(self):
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Setting up VR with ID %s' % router.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = router.id
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
def check_url(self, url):
import urllib2
try:
r = urllib.urlopen(url)
if r.code == 200:
return True
except urllib2.HTTPError:
return False
except urllib2.URLError:
return False
return True
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_14_retrieve_vr_one_file(self):
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Setting up VR with ID %s' % router.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = router.id
cmd.type = "/var/log/cloud.log"
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_15_retrieve_ssvm_default_files(self):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = ssvm.id
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_16_retrieve_ssvm_single_file(self):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = ssvm.id
cmd.type = "/var/log/cloud.log"
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_17_retrieve_cpvm_default_files(self):
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = cpvm.id
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_18_retrieve_cpvm_single_file(self):
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = cpvm.id
cmd.type = "/var/log/cloud.log"
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
|
|
'''
This module contains high-level functions and classes useful for solving a wide variety of
economic models.
'''
from HARKutilities import getArgNames, NullFunc
from copy import deepcopy
import numpy as np
#PNG addition 2016-06-30
from __main__ import settings
def distanceMetric(thing_A,thing_B):
'''
A "universal distance" metric that can be used as a default in many settings.
Parameters
----------
thing_A : object
A generic object.
thing_B : object
Another generic object.
Returns:
------------
distance : float
The "distance" between thing_A and thing_B.
'''
# Get the types of the two inputs
typeA = type(thing_A)
typeB = type(thing_B)
if typeA is list and typeB is list:
lenA = len(thing_A) # If both inputs are lists, then the distance between
lenB = len(thing_B) # them is the maximum distance between corresponding
if lenA == lenB: # elements in the lists. If they differ in length,
distance_temp = [] # the distance is the difference in lengths.
for n in range(lenA):
distance_temp.append(distanceMetric(thing_A[n],thing_B[n]))
distance = max(distance_temp)
else:
distance = float(abs(lenA - lenB))
# If both inputs are numbers, return their difference
elif (typeA is int or typeB is float) and (typeB is int or typeB is float):
distance = float(abs(thing_A - thing_B))
# If both inputs are array-like, return the maximum absolute difference b/w
# corresponding elements (if same shape); return largest difference in dimensions
# if shapes do not align.
elif hasattr(thing_A,'shape') and hasattr(thing_B,'shape'):
if thing_A.shape == thing_B.shape:
distance = np.max(abs(thing_A - thing_B))
else:
distance = np.max(abs(thing_A.shape - thing_B.shape))
# If none of the above cases, but the objects are of the same class, call
# the distance method of one on the other
elif thing_A.__class__.__name__ is thing_B.__class__.__name__:
distance = thing_A.distance(thing_B)
else: # Failsafe: the inputs are very far apart
distance = 1000.0
return distance
class HARKobject():
'''
A superclass for object classes in HARK. Comes with two useful methods:
a generic/universal distance method and an attribute assignment method.
'''
def distance(self,other):
'''
A generic distance method, which requires the existence of an attribute
called distance_criteria, giving a list of strings naming the attributes
to be considered by the distance metric.
Parameters
----------
other : object
Another object to compare this instance to.
Returns
-------
(unnamed) : float
The distance between this object and another, using the "universal
distance" metric.
'''
distance_list = [0.0]
for attr_name in self.distance_criteria:
obj_A = eval('self.' + attr_name)
obj_B = eval('other.' + attr_name)
distance_list.append(distanceMetric(obj_A,obj_B))
return max(distance_list)
def assignParameters(self,**kwds):
'''
Assign an arbitrary number of attributes to this agent.
Parameters
----------
**kwds : keyword arguments
Any number of keyword arguments of the form key=value. Each value
will be assigned to the attribute named in self.
Returns
-------
none
'''
for key in kwds:
setattr(self,key,kwds[key])
def __call__(self,**kwds):
'''
Assign an arbitrary number of attributes to this agent, as a convenience.
See assignParameters.
'''
self.assignParameters(**kwds)
class Solution(HARKobject):
'''
A superclass for representing the "solution" to a single period problem in a
dynamic microeconomic model.
NOTE: This can be deprecated now that HARKobject exists, but this requires
replacing each instance of Solution with HARKobject in the other modules.
'''
class AgentType(HARKobject):
'''
A superclass for economic agents in the HARK framework. Each model should
specify its own subclass of AgentType, inheriting its methods and overwriting
as necessary. Critically, every subclass of AgentType should define class-
specific static values of the attributes time_vary and time_inv as lists of
strings. Each element of time_vary is the name of a field in AgentSubType
that varies over time in the model. Each element of time_inv is the name of
a field in AgentSubType that is constant over time in the model. The string
'solveOnePeriod' should appear in exactly one of these lists, depending on
whether the same solution method is used in all periods of the model.
'''
def __init__(self,solution_terminal=NullFunc,cycles=1,time_flow=False,pseudo_terminal=True,
tolerance=0.000001,seed=0,**kwds):
'''
Initialize an instance of AgentType by setting attributes.
Parameters
----------
solution_terminal : Solution
A representation of the solution to the terminal period problem of
this AgentType instance, or an initial guess of the solution if this
is an infinite horizon problem.
cycles : int
The number of times the sequence of periods is experienced by this
AgentType in their "lifetime". cycles=1 corresponds to a lifecycle
model, with a certain sequence of one period problems experienced
once before terminating. cycles=0 corresponds to an infinite horizon
model, with a sequence of one period problems repeating indefinitely.
time_flow : boolean
Whether time is currently "flowing" forward or backward for this
instance. Used to flip between solving (using backward iteration)
and simulating (etc).
pseudo_terminal : boolean
Indicates whether solution_terminal isn't actually part of the
solution to the problem (as a known solution to the terminal period
problem), but instead represents a "scrap value"-style termination.
When True, solution_terminal is not included in the solution; when
false, solution_terminal is the last element of the solution.
tolerance : float
Maximum acceptable "distance" between successive solutions to the
one period problem in an infinite horizon (cycles=0) model in order
for the solution to be considered as having "converged". Inoperative
when cycles>0.
seed : int
A seed for this instance's random number generator.
Returns
-------
None
'''
self.solution_terminal = solution_terminal
self.cycles = cycles
self.time_flow = time_flow
self.pseudo_terminal = pseudo_terminal
self.solveOnePeriod = NullFunc
self.tolerance = tolerance
self.seed = seed
self.assignParameters(**kwds)
self.resetRNG()
def timeReport(self):
'''
Report to the user the direction that time is currently "flowing" for
this instance. Only exists as a reminder of how time_flow works.
Parameters
----------
none
Returns
-------
none
'''
if self.time_flow:
print('Time varying objects are listed in ordinary chronological order.')
else:
print('Time varying objects are listed in reverse chronological order.')
def timeFlip(self):
'''
Reverse the flow of time for this instance.
Parameters
----------
none
Returns
-------
none
'''
for name in self.time_vary:
exec('self.' + name + '.reverse()')
self.time_flow = not self.time_flow
def timeFwd(self):
'''
Make time flow forward for this instance.
Parameters
----------
none
Returns
-------
none
'''
if not self.time_flow:
self.timeFlip()
def timeRev(self):
'''
Make time flow backward for this instance.
Parameters
----------
none
Returns
-------
none
'''
if self.time_flow:
self.timeFlip()
def solve(self):
'''
Solve the model for this instance of an agent type by backward induction.
Loops through the sequence of one period problems, passing the solution
to period t+1 to the problem for period t.
Parameters
----------
none
Returns
-------
none
'''
self.preSolve() # Do pre-solution stuff
self.solution = solveAgent(self) # Solve the model by backward induction
if self.time_flow: # Put the solution in chronological order if this instance's time flow runs that way
self.solution.reverse()
if not ('solution' in self.time_vary):
self.time_vary.append('solution') # Add solution to the list of time-varying attributes
self.postSolve() # Do post-solution stuff
def resetRNG(self):
'''
Reset the random number generator for this type.
Parameters
----------
none
Returns
-------
none
'''
self.RNG = np.random.RandomState(self.seed)
def isSameThing(self,solutionA,solutionB):
'''
Compare two solutions to see if they are the "same." The model-specific
solution class must have a method called distance, which takes another
solution object as an input and returns the "distance" between the solutions.
This method is used to test for convergence in infinite horizon problems.
Parameters
----------
solutionA : Solution
The solution to a one period problem in the model.
solutionB : Solution
Another solution to (the same) one period problem in the model.
Returns
-------
(unnamed) : boolean
True if the solutions are within a tolerable distance of each other.
'''
solution_distance = solutionA.distance(solutionB)
return(solution_distance <= self.tolerance)
def preSolve(self):
'''
A method that is run immediately before the model is solved, to prepare
the terminal solution, perhaps. Does nothing here.
Parameters
----------
none
Returns
-------
none
'''
return None
def postSolve(self):
'''
A method that is run immediately after the model is solved, to finalize
the solution in some way. Does nothing here.
Parameters
----------
none
Returns
-------
none
'''
return None
def solveAgent(agent):
'''
Solve the dynamic model for one agent type. This function iterates on "cycles"
of an agent's model either a given number of times or until solution convergence
if an infinite horizon model is used (with agent.cycles = 0).
Parameters
----------
agent : AgentType
The microeconomic AgentType whose dynamic problem is to be solved.
Returns
-------
solution : [Solution]
A list of solutions to the one period problems that the agent will
encounter in his "lifetime". Returns in reverse chronological order.
'''
# Record the flow of time when the Agent began the process, and make sure time is flowing backwards
original_time_flow = agent.time_flow
agent.timeRev()
# Check to see whether this is an (in)finite horizon problem
cycles_left = agent.cycles
infinite_horizon = cycles_left == 0
# Initialize the solution, which includes the terminal solution if it's not a pseudo-terminal period
solution = []
if not agent.pseudo_terminal:
solution.append(deepcopy(agent.solution_terminal))
# Initialize the process, then loop over cycles
solution_last = agent.solution_terminal
go = True
completed_cycles = 0
max_cycles = 5000 # escape clause
while go:
# Solve a cycle of the model, recording it if horizon is finite
solution_cycle = solveOneCycle(agent,solution_last)
if not infinite_horizon:
solution += solution_cycle
# Check for termination: identical solutions across cycle iterations or run out of cycles
solution_now = solution_cycle[-1]
if infinite_horizon:
if completed_cycles > 0:
go = (not agent.isSameThing(solution_now,solution_last)) and \
(completed_cycles < max_cycles)
else: # Assume solution does not converge after only one cycle
go = True
else:
cycles_left += -1
go = cycles_left > 0
# Update the "last period solution"
solution_last = solution_now
completed_cycles += 1
# Record the last cycle if horizon is infinite (solution is still empty!)
if infinite_horizon:
solution = solution_cycle # PseudoTerminal=False impossible for infinite horizon
# Restore the direction of time to its original orientation, then return the solution
if original_time_flow:
agent.timeFwd()
return solution
def solveOneCycle(agent,solution_last):
'''
Solve one "cycle" of the dynamic model for one agent type. This function
iterates over the periods within an agent's cycle, updating the time-varying
parameters and passing them to the single period solver(s).
Parameters
----------
agent : AgentType
The microeconomic AgentType whose dynamic problem is to be solved.
solution_last : Solution
A representation of the solution of the period that comes after the
end of the sequence of one period problems. This might be the term-
inal period solution, a "pseudo terminal" solution, or simply the
solution to the earliest period from the succeeding cycle.
Returns
-------
solution_cycle : [Solution]
A list of one period solutions for one "cycle" of the AgentType's
microeconomic model. Returns in reverse chronological order.
'''
# Calculate number of periods per cycle, defaults to 1 if all variables are time invariant
if len(agent.time_vary) > 0:
name = agent.time_vary[0]
T = len(eval('agent.' + name))
else:
T = 1
# Check whether the same solution method is used in all periods
always_same_solver = 'solveOnePeriod' not in agent.time_vary
if always_same_solver:
solveOnePeriod = agent.solveOnePeriod
these_args = getArgNames(solveOnePeriod)
# Construct a dictionary to be passed to the solver
time_inv_string = ''
for name in agent.time_inv:
time_inv_string += ' \'' + name + '\' : agent.' +name + ','
time_vary_string = ''
for name in agent.time_vary:
time_vary_string += ' \'' + name + '\' : None,'
solve_dict = eval('{' + time_inv_string + time_vary_string + '}')
# Initialize the solution for this cycle, then iterate on periods
solution_cycle = []
solution_next = solution_last
for t in range(T):
# Update which single period solver to use (if it depends on time)
if not always_same_solver:
solveOnePeriod = agent.solveOnePeriod[t]
these_args = getArgNames(solveOnePeriod)
# Update time-varying single period inputs
for name in agent.time_vary:
if name in these_args:
solve_dict[name] = eval('agent.' + name + '[t]')
solve_dict['solution_next'] = solution_next
# Make a temporary dictionary for this period
temp_dict = {name: solve_dict[name] for name in these_args}
#PNG addition 2016-06-30
settings.t_curr = t
#temp_dict['t_curr'] = t
# Solve one period, add it to the solution, and move to the next period
solution_t = solveOnePeriod(**temp_dict)
solution_cycle.append(solution_t)
solution_next = solution_t
# Return the list of per-period solutions
return solution_cycle
#========================================================================
#========================================================================
class Market(HARKobject):
'''
A class to represent a central clearinghouse of information. Used for
dynamic general equilibrium models to solve the "macroeconomic" model as a
layer on top of the "microeconomic" models of one or more AgentTypes.
'''
def __init__(self,agents=[],sow_vars=[],reap_vars=[],const_vars=[],track_vars=[],dyn_vars=[],
millRule=None,calcDynamics=None,act_T=1000,tolerance=0.000001):
'''
Make a new instance of the Market class.
Parameters
----------
agents : [AgentType]
A list of all the AgentTypes in this market.
sow_vars : [string]
Names of variables generated by the "aggregate market process" that should
be "sown" to the agents in the market. Aggregate state, etc.
reap_vars : [string]
Names of variables to be collected ("reaped") from agents in the market
to be used in the "aggregate market process".
const_vars : [string]
Names of attributes of the Market instance that are used in the "aggregate
market process" but do not come from agents-- they are constant or simply
parameters inherent to the process.
track_vars : [string]
Names of variables generated by the "aggregate market process" that should
be tracked as a "history" so that a new dynamic rule can be calculated.
This is often a subset of sow_vars.
dyn_vars : [string]
Names of variables that constitute a "dynamic rule".
millRule : function
A function that takes inputs named in reap_vars and returns an object
with attributes named in sow_vars. The "aggregate market process" that
transforms individual agent actions/states/data into aggregate data to
be sent back to agents.
calcDynamics : function
A function that takes inputs named in track_vars and returns an object
with attributes named in dyn_vars. Looks at histories of aggregate
variables and generates a new "dynamic rule" for agents to believe and
act on.
act_T : int
The number of times that the "aggregate market process" should be run
in order to generate a history of aggregate variables.
tolerance: float
Minimum acceptable distance between "dynamic rules" to consider the
Market solution process converged. Distance is a user-defined metric.
Returns
-------
None
'''
self.agents = agents
self.reap_vars = reap_vars
self.sow_vars = sow_vars
self.const_vars = const_vars
self.track_vars = track_vars
self.dyn_vars = dyn_vars
if millRule is not None: # To prevent overwriting of method-based millRules
self.millRule = millRule
if calcDynamics is not None: # Ditto for calcDynamics
self.calcDynamics = calcDynamics
self.act_T = act_T
self.tolerance = tolerance
def solve(self):
'''
"Solves" the market by finding a "dynamic rule" that governs the aggregate
market state such that when agents believe in these dynamics, their actions
collectively generate the same dynamic rule.
Parameters
----------
none
Returns
-------
none
'''
go = True
max_loops = 1000 # Failsafe against infinite solution loop
completed_loops = 0
old_dynamics = None
while go: # Loop until the dynamic process converges or we hit the loop cap
for this_type in self.agents:
this_type.solve() # Solve each AgentType's micro problem
self.makeHistory() # "Run" the model while tracking aggregate variables
new_dynamics = self.updateDynamics() # Find a new aggregate dynamic rule
# Check to see if the dynamic rule has converged (if this is not the first loop)
if completed_loops > 0:
distance = new_dynamics.distance(old_dynamics)
else:
distance = 1000000.0
# Move to the next loop if the terminal conditions are not met
old_dynamics = new_dynamics
completed_loops += 1
go = distance >= self.tolerance and completed_loops < max_loops
self.dynamics = new_dynamics # Store the final dynamic rule in self
def reap(self):
'''
Collects attributes named in reap_vars from each AgentType in the market,
storing them in respectively named attributes of self.
Parameters
----------
none
Returns
-------
none
'''
for var_name in self.reap_vars:
harvest = []
for this_type in self.agents:
harvest.append(getattr(this_type,var_name))
setattr(self,var_name,harvest)
def sow(self):
'''
Distributes attrributes named in sow_vars from self to each AgentType
in the market, storing them in respectively named attributes.
Parameters
----------
none
Returns
-------
none
'''
for var_name in self.sow_vars:
this_seed = getattr(self,var_name)
for this_type in self.agents:
setattr(this_type,var_name,this_seed)
def mill(self):
'''
Processes the variables collected from agents using the function millRule,
storing the results in attributes named in aggr_sow.
Parameters
----------
none
Returns
-------
none
'''
# Make a dictionary of inputs for the millRule
reap_vars_string = ''
for name in self.reap_vars:
reap_vars_string += ' \'' + name + '\' : self.' + name + ','
const_vars_string = ''
for name in self.const_vars:
const_vars_string += ' \'' + name + '\' : self.' + name + ','
mill_dict = eval('{' + reap_vars_string + const_vars_string + '}')
# Run the millRule and store its output in self
product = self.millRule(**mill_dict)
for j in range(len(self.sow_vars)):
this_var = self.sow_vars[j]
this_product = getattr(product,this_var)
setattr(self,this_var,this_product)
def cultivate(self):
'''
Has each AgentType in agents perform their marketAction method, using
variables sown from the market (and maybe also "private" variables).
The marketAction method should store new results in attributes named in
reap_vars to be reaped later.
Parameters
----------
none
Returns
-------
none
'''
for this_type in self.agents:
this_type.marketAction()
def reset(self):
'''
Reset the state of the market (attributes in sow_vars, etc) to some
user-defined initial state, and erase the histories of tracked variables.
Parameters
----------
none
Returns
-------
none
'''
for var_name in self.track_vars: # Reset the history of tracked variables
setattr(self,var_name + '_hist',[])
for var_name in self.sow_vars: # Set the sow variables to their initial levels
initial_val = getattr(self,var_name + '_init')
setattr(self,var_name,initial_val)
for this_type in self.agents: # Reset each AgentType in the market
this_type.reset()
def store(self):
'''
Record the current value of each variable X named in track_vars in an
attribute named X_hist.
Parameters
----------
none
Returns
-------
none
'''
for var_name in self.track_vars:
value_now = getattr(self,var_name)
getattr(self,var_name + '_hist').append(value_now)
def makeHistory(self):
'''
Runs a loop of sow-->cultivate-->reap-->mill act_T times, tracking the
evolution of variables X named in track_vars in attributes named X_hist.
Parameters
----------
none
Returns
-------
none
'''
self.reset() # Initialize the state of the market
for t in range(self.act_T):
self.sow() # Distribute aggregated information/state to agents
self.cultivate() # Agents take action
self.reap() # Collect individual data from agents
self.mill() # Process individual data into aggregate data
self.store() # Record variables of interest
def updateDynamics(self):
'''
Calculates a new "aggregate dynamic rule" using the history of variables
named in track_vars, and distributes this rule to AgentTypes in agents.
Parameters
----------
none
Returns
-------
dynamics : instance
The new "aggregate dynamic rule" that agents believe in and act on.
Should have attributes named in dyn_vars.
'''
# Make a dictionary of inputs for the dynamics calculator
history_vars_string = ''
for name in self.track_vars:
history_vars_string += ' \'' + name + '\' : self.' + name + '_hist,'
update_dict = eval('{' + history_vars_string + '}')
# Calculate a new dynamic rule and distribute it to the agents in agent_list
dynamics = self.calcDynamics(**update_dict) # User-defined dynamics calculator
for var_name in self.dyn_vars:
this_obj = getattr(dynamics,var_name)
for this_type in self.agents:
setattr(this_type,var_name,this_obj)
return dynamics
|
|
import collections
import copy
import hashlib
import os
import pathlib
import shutil
import subprocess
import tempfile
import yaml
from supriya import (
BinaryOperator,
CalculationRate,
ParameterRate,
UnaryOperator,
sclang,
)
from supriya.system import SupriyaObject
from .bases import BinaryOpUGen, UGen, UnaryOpUGen, WidthFirstUGen
from .compilers import SynthDefCompiler
from .controls import AudioControl, Control, LagControl, Parameter, TrigControl
from .grapher import SynthDefGrapher
from .mixins import OutputProxy, UGenMethodMixin
class SynthDef:
"""
A synth definition.
::
>>> import supriya.synthdefs
>>> import supriya.ugens
>>> with supriya.synthdefs.SynthDefBuilder(frequency=440) as builder:
... sin_osc = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
... out = supriya.ugens.Out.ar(bus=0, source=sin_osc)
...
>>> synthdef = builder.build()
::
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> import supriya.realtime
>>> server = supriya.Server().boot()
::
>>> synthdef.allocate(server=server)
<SynthDef: 9c4eb4778dc0faf39459fa8a5cd45c19>
::
>>> synthdef in server
True
::
>>> synthdef.free(server)
::
>>> synthdef in server
False
::
>>> server.quit()
<Server: offline>
"""
### CLASS VARIABLES ###
__slots__ = (
"_compiled_ugen_graph",
"_constants",
"_control_ugens",
"_indexed_parameters",
"_name",
"_ugens",
)
### INITIALIZER ###
def __init__(self, ugens, name=None, optimize=True, parameter_names=None, **kwargs):
self._name = name
ugens = list(copy.deepcopy(ugens))
assert all(isinstance(_, UGen) for _ in ugens)
ugens = self._cleanup_pv_chains(ugens)
ugens = self._cleanup_local_bufs(ugens)
if optimize:
ugens = self._optimize_ugen_graph(ugens)
ugens = self._sort_ugens_topologically(ugens)
self._ugens = tuple(ugens)
self._constants = self._collect_constants(self._ugens)
self._control_ugens = self._collect_control_ugens(self._ugens)
self._indexed_parameters = self._collect_indexed_parameters(
self._control_ugens, parameter_names=parameter_names
)
self._compiled_ugen_graph = SynthDefCompiler.compile_ugen_graph(self)
### SPECIAL METHODS ###
def __eq__(self, expr):
if type(expr) != type(self):
return False
if expr.name != self.name:
return False
if expr._compiled_ugen_graph != self._compiled_ugen_graph:
return False
return True
def __graph__(self):
r"""
Graphs SynthDef.
::
>>> with supriya.synthdefs.SynthDefBuilder(frequency=440) as builder:
... sin_osc = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
... out = supriya.ugens.Out.ar(bus=0, source=sin_osc)
...
>>> synthdef = builder.build()
>>> print(format(synthdef.__graph__(), "graphviz"))
digraph synthdef_... {
graph [bgcolor=transparent,
color=lightslategrey,
dpi=72,
fontname=Arial,
outputorder=edgesfirst,
overlap=prism,
penwidth=2,
rankdir=LR,
ranksep=1,
splines=spline,
style="dotted, rounded"];
node [fontname=Arial,
fontsize=12,
penwidth=2,
shape=Mrecord,
style="filled, rounded"];
edge [penwidth=2];
ugen_0 [fillcolor=lightgoldenrod2,
label="<f_0> Control\n(control) | { { <f_1_0_0> frequency:\n440.0 } }"];
ugen_1 [fillcolor=lightsteelblue2,
label="<f_0> SinOsc\n(audio) | { { <f_1_0_0> frequency | <f_1_0_1> phase:\n0.0 } | { <f_1_1_0> 0 } }"];
ugen_2 [fillcolor=lightsteelblue2,
label="<f_0> Out\n(audio) | { { <f_1_0_0> bus:\n0.0 | <f_1_0_1> source } }"];
ugen_0:f_1_0_0:e -> ugen_1:f_1_0_0:w [color=goldenrod];
ugen_1:f_1_1_0:e -> ugen_2:f_1_0_1:w [color=steelblue];
}
Returns Graphviz graph.
"""
return SynthDefGrapher.graph(self)
def __hash__(self):
hash_values = (type(self), self._name, self._compiled_ugen_graph)
return hash(hash_values)
def __repr__(self):
return "<{}: {}>".format(type(self).__name__, self.actual_name)
def __str__(self):
"""
Gets string representation of synth definition.
::
>>> import supriya.synthdefs
>>> import supriya.ugens
::
>>> with supriya.synthdefs.SynthDefBuilder() as builder:
... sin_one = supriya.ugens.SinOsc.ar()
... sin_two = supriya.ugens.SinOsc.ar(frequency=443)
... source = sin_one + sin_two
... out = supriya.ugens.Out.ar(bus=0, source=source)
...
>>> synthdef = builder.build(name="test")
::
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> print(synthdef)
synthdef:
name: test
ugens:
- SinOsc.ar/0:
frequency: 440.0
phase: 0.0
- SinOsc.ar/1:
frequency: 443.0
phase: 0.0
- BinaryOpUGen(ADDITION).ar:
left: SinOsc.ar/0[0]
right: SinOsc.ar/1[0]
- Out.ar:
bus: 0.0
source[0]: BinaryOpUGen(ADDITION).ar[0]
Returns string.
"""
def get_ugen_names():
grouped_ugens = {}
named_ugens = {}
for ugen in self._ugens:
key = (type(ugen), ugen.calculation_rate, ugen.special_index)
grouped_ugens.setdefault(key, []).append(ugen)
for ugen in self._ugens:
parts = [type(ugen).__name__]
if isinstance(ugen, BinaryOpUGen):
ugen_op = BinaryOperator.from_expr(ugen.special_index)
parts.append("(" + ugen_op.name + ")")
elif isinstance(ugen, UnaryOpUGen):
ugen_op = UnaryOperator.from_expr(ugen.special_index)
parts.append("(" + ugen_op.name + ")")
parts.append("." + ugen.calculation_rate.token)
key = (type(ugen), ugen.calculation_rate, ugen.special_index)
related_ugens = grouped_ugens[key]
if len(related_ugens) > 1:
parts.append("/{}".format(related_ugens.index(ugen)))
named_ugens[ugen] = "".join(parts)
return named_ugens
def get_parameter_name(input_, output_index=0):
if isinstance(input_, Parameter):
return ":{}".format(input_.name)
elif isinstance(input_, Control):
# Handle array-like parameters
value_index = 0
for parameter in input_.parameters:
values = parameter.value
if isinstance(values, float):
values = [values]
for i in range(len(values)):
if value_index != output_index:
value_index += 1
continue
elif len(values) == 1:
return ":{}".format(parameter.name)
else:
return ":{}[{}]".format(parameter.name, i)
return ""
ugens = []
named_ugens = get_ugen_names()
for ugen in self._ugens:
ugen_dict = {}
ugen_name = named_ugens[ugen]
for i, input_ in enumerate(ugen.inputs):
if i < len(ugen._ordered_input_names):
argument_name = tuple(ugen._ordered_input_names)[i]
else:
argument_name = tuple(ugen._ordered_input_names)[-1]
if (
ugen._unexpanded_input_names
and argument_name in ugen._unexpanded_input_names
):
unexpanded_index = i - tuple(ugen._ordered_input_names).index(
argument_name
)
argument_name += "[{}]".format(unexpanded_index)
if isinstance(input_, float):
value = input_
else:
output_index = 0
if isinstance(input_, OutputProxy):
output_index = input_.output_index
input_ = input_.source
input_name = named_ugens[input_]
value = "{}[{}{}]".format(
input_name,
output_index,
get_parameter_name(input_, output_index),
)
ugen_dict[argument_name] = value
if not ugen_dict:
ugen_dict = None
ugens.append({ugen_name: ugen_dict})
result = {
"synthdef": {
"name": self.actual_name,
# 'hash': self.anonymous_name,
"ugens": ugens,
}
}
return yaml.dump(result, default_flow_style=False, indent=4).rstrip()
### PRIVATE METHODS ###
@staticmethod
def _allocate_synthdefs(synthdefs, server):
# TODO: Should sync be configurable here?
import supriya.commands
d_recv_synthdef_groups = []
d_recv_synth_group = []
current_total = 0
d_load_synthdefs = []
if not synthdefs:
return
for synthdef in synthdefs:
# synthdef._register_with_local_server(server=server)
compiled = synthdef.compile()
if 8192 < len(compiled):
d_load_synthdefs.append(synthdef)
elif current_total + len(compiled) < 8192:
d_recv_synth_group.append(synthdef)
current_total += len(compiled)
else:
d_recv_synthdef_groups.append(d_recv_synth_group)
d_recv_synth_group = [synthdef]
current_total = len(compiled)
if d_recv_synth_group:
d_recv_synthdef_groups.append(d_recv_synth_group)
for d_recv_synth_group in d_recv_synthdef_groups:
d_recv_request = supriya.commands.SynthDefReceiveRequest(
synthdefs=tuple(d_recv_synth_group)
)
d_recv_request.communicate(server=server, sync=True)
if d_load_synthdefs:
temp_directory_path = tempfile.mkdtemp()
for synthdef in d_load_synthdefs:
file_name = "{}.scsyndef".format(synthdef.actual_name)
file_path = os.path.join(temp_directory_path, file_name)
with open(file_path, "wb") as file_pointer:
file_pointer.write(synthdef.compile())
d_load_dir_request = supriya.commands.SynthDefLoadDirectoryRequest(
directory_path=temp_directory_path
)
d_load_dir_request.communicate(server=server, sync=True)
shutil.rmtree(temp_directory_path)
@staticmethod
def _build_control_mapping(parameters):
control_mapping = collections.OrderedDict()
scalar_parameters = []
trigger_parameters = []
audio_parameters = []
control_parameters = []
mapping = {
ParameterRate.AUDIO: audio_parameters,
ParameterRate.CONTROL: control_parameters,
ParameterRate.SCALAR: scalar_parameters,
ParameterRate.TRIGGER: trigger_parameters,
}
for parameter in parameters:
mapping[parameter.parameter_rate].append(parameter)
for filtered_parameters in mapping.values():
filtered_parameters.sort(key=lambda x: x.name)
control_ugens = []
indexed_parameters = []
starting_control_index = 0
if scalar_parameters:
control = Control(
parameters=scalar_parameters,
calculation_rate=CalculationRate.SCALAR,
starting_control_index=starting_control_index,
)
control_ugens.append(control)
for parameter in scalar_parameters:
indexed_parameters.append((starting_control_index, parameter))
starting_control_index += len(parameter)
for i, output_proxy in enumerate(control._get_parameter_output_proxies()):
control_mapping[output_proxy] = control[i]
if trigger_parameters:
control = TrigControl(
parameters=trigger_parameters,
starting_control_index=starting_control_index,
)
control_ugens.append(control)
for parameter in trigger_parameters:
indexed_parameters.append((starting_control_index, parameter))
starting_control_index += len(parameter)
for i, output_proxy in enumerate(control._get_parameter_output_proxies()):
control_mapping[output_proxy] = control[i]
if audio_parameters:
control = AudioControl(
parameters=audio_parameters,
starting_control_index=starting_control_index,
)
control_ugens.append(control)
for parameter in audio_parameters:
indexed_parameters.append((starting_control_index, parameter))
starting_control_index += len(parameter)
for i, output_proxy in enumerate(control._get_parameter_output_proxies()):
control_mapping[output_proxy] = control[i]
if control_parameters:
if any(_.lag for _ in control_parameters):
control = LagControl(
parameters=control_parameters,
calculation_rate=CalculationRate.CONTROL,
starting_control_index=starting_control_index,
)
else:
control = Control(
parameters=control_parameters,
calculation_rate=CalculationRate.CONTROL,
starting_control_index=starting_control_index,
)
control_ugens.append(control)
for parameter in control_parameters:
indexed_parameters.append((starting_control_index, parameter))
starting_control_index += len(parameter)
for i, output_proxy in enumerate(control._get_parameter_output_proxies()):
control_mapping[output_proxy] = control[i]
control_ugens = tuple(control_ugens)
indexed_parameters.sort(key=lambda pair: parameters.index(pair[1]))
indexed_parameters = tuple(indexed_parameters)
return control_ugens, control_mapping, indexed_parameters
@staticmethod
def _build_input_mapping(ugens):
import supriya.ugens
input_mapping = {}
for ugen in ugens:
if not isinstance(ugen, supriya.ugens.PV_ChainUGen):
continue
if isinstance(ugen, supriya.ugens.PV_Copy):
continue
for i, input_ in enumerate(ugen.inputs):
if not isinstance(input_, OutputProxy):
continue
source = input_.source
if not isinstance(source, supriya.ugens.PV_ChainUGen):
continue
if source not in input_mapping:
input_mapping[source] = []
input_mapping[source].append((ugen, i))
return input_mapping
@staticmethod
def _cleanup_local_bufs(ugens):
import supriya.ugens
local_bufs = []
processed_ugens = []
for ugen in ugens:
if isinstance(ugen, supriya.ugens.MaxLocalBufs):
continue
if isinstance(ugen, supriya.ugens.LocalBuf):
local_bufs.append(ugen)
processed_ugens.append(ugen)
if local_bufs:
max_local_bufs = supriya.ugens.MaxLocalBufs(len(local_bufs))
for local_buf in local_bufs:
inputs = list(local_buf.inputs[:2])
inputs.append(max_local_bufs[0])
local_buf._inputs = tuple(inputs)
index = processed_ugens.index(local_bufs[0])
processed_ugens[index:index] = [max_local_bufs]
return tuple(processed_ugens)
@staticmethod
def _cleanup_pv_chains(ugens):
import supriya.ugens
input_mapping = SynthDef._build_input_mapping(ugens)
for antecedent, descendants in input_mapping.items():
if len(descendants) == 1:
continue
for descendant, input_index in descendants[:-1]:
fft_size = antecedent.fft_size
new_buffer = supriya.ugens.LocalBuf(fft_size)
pv_copy = supriya.ugens.PV_Copy(antecedent, new_buffer)
inputs = list(descendant._inputs)
inputs[input_index] = pv_copy[0]
descendant._inputs = tuple(inputs)
index = ugens.index(descendant)
replacement = []
if isinstance(fft_size, UGenMethodMixin):
replacement.append(fft_size)
replacement.extend([new_buffer, pv_copy])
ugens[index:index] = replacement
return ugens
@staticmethod
def _collect_constants(ugens):
constants = []
for ugen in ugens:
for input_ in ugen._inputs:
if not isinstance(input_, float):
continue
if input_ not in constants:
constants.append(input_)
return tuple(constants)
@staticmethod
def _collect_control_ugens(ugens):
control_ugens = tuple(_ for _ in ugens if isinstance(_, Control))
return control_ugens
@staticmethod
def _collect_indexed_parameters(control_ugens, parameter_names=None):
indexed_parameters = []
parameters = {}
for control_ugen in control_ugens:
index = control_ugen.starting_control_index
for parameter in control_ugen.parameters:
parameters[parameter.name] = (index, parameter)
index += len(parameter)
parameter_names = parameter_names or sorted(parameters)
for parameter_name in parameter_names:
indexed_parameters.append(parameters[parameter_name])
indexed_parameters = tuple(indexed_parameters)
return indexed_parameters
@staticmethod
def _extract_parameters(ugens):
parameters = set()
for ugen in ugens:
if isinstance(ugen, Parameter):
parameters.add(ugen)
ugens = tuple(ugen for ugen in ugens if ugen not in parameters)
parameters = tuple(sorted(parameters, key=lambda x: x.name))
return ugens, parameters
@staticmethod
def _initialize_topological_sort(ugens):
ugens = list(ugens)
sort_bundles = collections.OrderedDict()
width_first_antecedents = []
for ugen in ugens:
sort_bundles[ugen] = UGenSortBundle(ugen, width_first_antecedents)
if isinstance(ugen, WidthFirstUGen):
width_first_antecedents.append(ugen)
for ugen in ugens:
sort_bundle = sort_bundles[ugen]
sort_bundle._initialize_topological_sort(sort_bundles)
sort_bundle.descendants[:] = sorted(
sort_bundles[ugen].descendants, key=lambda x: ugens.index(ugen)
)
return sort_bundles
@staticmethod
def _optimize_ugen_graph(ugens):
sort_bundles = SynthDef._initialize_topological_sort(ugens)
for ugen in ugens:
ugen._optimize_graph(sort_bundles)
return tuple(sort_bundles)
def _register_with_local_server(self, server):
synthdef_name = self.actual_name
server._synthdefs[synthdef_name] = self
@staticmethod
def _remap_controls(ugens, control_mapping):
for ugen in ugens:
inputs = list(ugen.inputs)
for i, input_ in enumerate(inputs):
if input_ in control_mapping:
output_proxy = control_mapping[input_]
inputs[i] = output_proxy
ugen._inputs = tuple(inputs)
@staticmethod
def _sort_ugens_topologically(ugens):
sort_bundles = SynthDef._initialize_topological_sort(ugens)
available_ugens = []
for ugen in reversed(ugens):
sort_bundles[ugen]._make_available(available_ugens)
out_stack = []
while available_ugens:
available_ugen = available_ugens.pop()
sort_bundles[available_ugen]._schedule(
available_ugens, out_stack, sort_bundles
)
return out_stack
### PUBLIC METHODS ###
def allocate(self, server):
self._allocate_synthdefs((self,), server)
return self
def compile(self, use_anonymous_name=False):
from supriya.synthdefs import SynthDefCompiler
synthdefs = [self]
result = SynthDefCompiler.compile_synthdefs(
synthdefs, use_anonymous_names=use_anonymous_name
)
return result
def free(self, server):
import supriya.commands
assert self in server
synthdef_name = self.actual_name
del server._synthdefs[synthdef_name]
request = supriya.commands.SynthDefFreeRequest(synthdef=self)
if server.is_running:
request.communicate(server=server)
def to_dict(self):
"""
Convert SynthDef to JSON-serializable dictionay.
::
>>> import json
>>> result = supriya.assets.synthdefs.default.to_dict()
>>> result = json.dumps(
... result,
... indent=4,
... separators=(",", ": "),
... sort_keys=True,
... )
>>> print(result)
{
"synthdef": {
"hash": "da0982184cc8fa54cf9d288a0fe1f6ca",
"name": "default",
"parameters": {
"amplitude": {
"range": [
0,
1
],
"rate": "control",
"unit": null,
"value": 0.1
},
"frequency": {
"range": [
0,
1
],
"rate": "control",
"unit": null,
"value": 440.0
},
"gate": {
"range": [
0,
1
],
"rate": "control",
"unit": null,
"value": 1.0
},
"out": {
"range": [
0,
1
],
"rate": "scalar",
"unit": null,
"value": 0.0
},
"pan": {
"range": [
0,
1
],
"rate": "control",
"unit": null,
"value": 0.5
}
}
}
}
"""
result = {
"name": self.actual_name,
"hash": self.anonymous_name,
"parameters": {},
}
for parameter_name, parameter in self.parameters.items():
range_ = [0, 1]
if parameter.range_:
range_ = [parameter.range_.minimum, parameter.range_.maximum]
rate = parameter.parameter_rate.name.lower()
result["parameters"][parameter_name] = {
"rate": rate,
"range": range_,
"unit": parameter.unit,
"value": parameter.value,
}
result = {"synthdef": result}
return result
### PUBLIC PROPERTIES ###
@property
def actual_name(self):
return self.name or self.anonymous_name
@property
def anonymous_name(self):
md5 = hashlib.md5()
md5.update(self._compiled_ugen_graph)
anonymous_name = md5.hexdigest()
return anonymous_name
@property
def audio_channel_count(self):
return max(self.audio_input_channel_count, self.audio_output_channel_count)
@property
def audio_input_channel_count(self):
"""
Gets audio input channel count of synthdef.
::
>>> with supriya.SynthDefBuilder() as builder:
... audio_in = supriya.ugens.In.ar(channel_count=1)
... control_in = supriya.ugens.In.kr(channel_count=2)
... sin = supriya.ugens.SinOsc.ar(
... frequency=audio_in,
... )
... source = audio_in * control_in[1]
... audio_out = supriya.ugens.Out.ar(source=[source] * 4)
...
>>> synthdef = builder.build()
::
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> synthdef.audio_input_channel_count
1
Returns integer.
"""
ugens = tuple(
_ for _ in self.input_ugens if _.calculation_rate == CalculationRate.AUDIO
)
if len(ugens) == 1:
return ugens[0].channel_count
elif not ugens:
return 0
raise ValueError
@property
def audio_output_channel_count(self):
"""
Gets audio output channel count of synthdef.
::
>>> with supriya.SynthDefBuilder() as builder:
... audio_in = supriya.ugens.In.ar(channel_count=1)
... control_in = supriya.ugens.In.kr(channel_count=2)
... sin = supriya.ugens.SinOsc.ar(
... frequency=audio_in,
... )
... source = audio_in * control_in[1]
... audio_out = supriya.ugens.Out.ar(source=[source] * 4)
...
>>> synthdef = builder.build()
::
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> synthdef.audio_output_channel_count
4
Returns integer.
"""
ugens = tuple(
_ for _ in self.output_ugens if _.calculation_rate == CalculationRate.AUDIO
)
if len(ugens) == 1:
return len(ugens[0].source)
elif not ugens:
return 0
raise ValueError
@property
def constants(self):
return self._constants
@property
def control_ugens(self):
return self._control_ugens
@property
def control_channel_count(self):
return max(self.control_input_channel_count, self.control_output_channel_count)
@property
def control_input_channel_count(self):
"""
Gets control input channel count of synthdef.
::
>>> with supriya.SynthDefBuilder() as builder:
... audio_in = supriya.ugens.In.ar(channel_count=1)
... control_in = supriya.ugens.In.kr(channel_count=2)
... sin = supriya.ugens.SinOsc.ar(
... frequency=audio_in,
... )
... source = audio_in * control_in[1]
... audio_out = supriya.ugens.Out.ar(source=[source] * 4)
...
>>> synthdef = builder.build()
::
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> synthdef.control_input_channel_count
2
Returns integer.
"""
ugens = tuple(
_ for _ in self.input_ugens if _.calculation_rate == CalculationRate.CONTROL
)
if len(ugens) == 1:
return ugens[0].channel_count
elif not ugens:
return 0
raise ValueError
@property
def control_output_channel_count(self):
"""
Gets control output channel count of synthdef.
::
>>> with supriya.SynthDefBuilder() as builder:
... audio_in = supriya.ugens.In.ar(channel_count=1)
... control_in = supriya.ugens.In.kr(channel_count=2)
... sin = supriya.ugens.SinOsc.ar(
... frequency=audio_in,
... )
... source = audio_in * control_in[1]
... audio_out = supriya.ugens.Out.ar(source=[source] * 4)
...
>>> synthdef = builder.build()
::
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> synthdef.control_output_channel_count
0
Returns integer.
"""
ugens = tuple(
_
for _ in self.output_ugens
if _.calculation_rate == CalculationRate.CONTROL
)
if len(ugens) == 1:
return len(ugens[0].source)
elif not ugens:
return 0
raise ValueError
@property
def done_actions(self):
done_actions = set()
for ugen in self.ugens:
done_action = ugen._get_done_action()
if done_action is not None:
done_actions.add(done_action)
return sorted(done_actions)
@property
def has_gate(self):
return "gate" in self.parameter_names
@property
def indexed_parameters(self):
return self._indexed_parameters
@property
def input_ugens(self):
return tuple(_ for _ in self.ugens if _.is_input_ugen)
@property
def is_allocated(self):
if self.server is not None:
return self in self.server
return False
@property
def name(self):
return self._name
@property
def output_ugens(self):
return tuple(_ for _ in self.ugens if _.is_output_ugen)
@property
def parameters(self):
return {
parameter.name: parameter for index, parameter in self.indexed_parameters
}
@property
def parameter_names(self):
return [parameter.name for index, parameter in self.indexed_parameters]
@property
def ugens(self):
return self._ugens
class UGenSortBundle(SupriyaObject):
### INITIALIZER ###
def __init__(self, ugen, width_first_antecedents):
self.antecedents = []
self.descendants = []
self.ugen = ugen
self.width_first_antecedents = tuple(width_first_antecedents)
### PRIVATE METHODS ###
def _initialize_topological_sort(self, sort_bundles):
for input_ in self.ugen.inputs:
if isinstance(input_, OutputProxy):
input_ = input_.source
elif not isinstance(input_, UGen):
continue
input_sort_bundle = sort_bundles[input_]
if input_ not in self.antecedents:
self.antecedents.append(input_)
if self.ugen not in input_sort_bundle.descendants:
input_sort_bundle.descendants.append(self.ugen)
for input_ in self.width_first_antecedents:
input_sort_bundle = sort_bundles[input_]
if input_ not in self.antecedents:
self.antecedents.append(input_)
if self.ugen not in input_sort_bundle.descendants:
input_sort_bundle.descendants.append(self.ugen)
def _make_available(self, available_ugens):
if not self.antecedents:
if self.ugen not in available_ugens:
available_ugens.append(self.ugen)
def _schedule(self, available_ugens, out_stack, sort_bundles):
for ugen in reversed(self.descendants):
sort_bundle = sort_bundles[ugen]
sort_bundle.antecedents.remove(self.ugen)
sort_bundle._make_available(available_ugens)
out_stack.append(self.ugen)
### PUBLIC METHODS ###
def clear(self):
self.antecedents[:] = []
self.descendants[:] = []
self.width_first_antecedents[:] = []
class SuperColliderSynthDef(SupriyaObject):
### CLASS VARIABLES ###
__slots__ = ("_body", "_name", "_rates")
### INITIALIZER ###
def __init__(self, name, body, rates=None):
self._name = name
self._body = body
self._rates = rates
### PRIVATE METHODS ###
def _build_sc_input(self, directory_path):
input_ = []
input_.append("a = SynthDef(")
input_.append(" \\{}, {{".format(self.name))
for line in self.body.splitlines():
input_.append(" " + line)
if self.rates:
input_.append("}}, {});".format(self.rates))
else:
input_.append("});")
input_.append('"Defined SynthDef".postln;')
input_.append('a.writeDefFile("{}");'.format(directory_path))
input_.append('"Wrote SynthDef".postln;')
input_.append("0.exit;")
input_ = "\n".join(input_)
return input_
### PUBLIC METHODS ###
def compile(self):
sclang_path = sclang.find()
with tempfile.TemporaryDirectory() as directory:
directory_path = pathlib.Path(directory)
sc_input = self._build_sc_input(directory_path)
print(sc_input)
sc_file_path = directory_path / f"{self.name}.sc"
sc_file_path.write_text(sc_input)
command = " ".join([str(sclang_path), "-D", str(sc_file_path)])
print(command)
subprocess.run(command, shell=True)
result = (directory_path / f"{self.name}.scsyndef").read_bytes()
return bytes(result)
### PUBLIC PROPERTIES ###
@property
def body(self):
return self._body
@property
def rates(self):
return self._rates
@property
def name(self):
return self._name
|
|
###################################################
# ldp python module (ldp.py)
# use: import ldp (in your script)
# A set of functions to make it easier to interface
# with Embedded Adventures' 80x8 led matrix LDP-8008
# By Pete Goss 14/1/2014
###################################################
# connect Raspberry Pi GPIO to J1 on LDP-8008
###################################################
# GPIO pin LDP-8008 pin
# 3 ------------> 2 A (Row address)
# 5 ------------> 4 B (Row address)
# 6 ------------> 5 GND
# 7 ------------> 6 C (Row address)
# 8 ------------> 7 EN (Enable Display)
# 10 ------------> 8 D (Row address)
# 11 ------------> 9 \R1 (Red Led)
# 12 ------------> 10 \G1 (Green Led)
# 13 ------------> 14 L (Latch)
# 15 ------------> 16 S (Shift)
###################################################
import RPi.GPIO as gpio
gpio.setwarnings(False)
gpio.setmode(gpio.BOARD)
###################################################
# give the gpio pins labels that match the LDP-8008
###################################################
R1=11
G1=12
EN=8
A=3
B=5
C=7
D=10
L=13
S=15
####################################
# init function
# usage: ldp.init()
# function initialises the LDP-8008
# use once at beginning of script
####################################
def init():
# set GPIO pins as outputs
gpio.setup(R1,gpio.OUT)
gpio.setup(G1,gpio.OUT)
gpio.setup(EN,gpio.OUT)
gpio.setup(A,gpio.OUT)
gpio.setup(B,gpio.OUT)
gpio.setup(C,gpio.OUT)
gpio.setup(D,gpio.OUT)
gpio.setup(L,gpio.OUT)
gpio.setup(S,gpio.OUT)
#initialise the output pins
gpio.output(R1,1)
gpio.output(G1,1)
gpio.output(S,1)
gpio.output(L,0)
gpio.output(EN,0)
clear()
####################################
# end init function
####################################
####################################
# clear function
# usage: ldp.clear()
# function sets the shift register
# bits to blank and turns off display
####################################
def clear():
gpio.output(R1,1)
gpio.output(G1,1)
for i in range(80):
gpio.output(S,1)
gpio.output(S,0)
gpio.output(S,1)
displayoff()
####################################
# end init function
####################################
####################################
# shift function
# usage: ldp.shift()
# function shifts the current led colour
# into the first column of the register
####################################
def shift():
gpio.output(S,1)
gpio.output(S,0)
gpio.output(S,1)
####################################
# end shift function
####################################
####################################
# colour function
# usage: ldp.colour(colour_value)
# sets the current led colour
# 0=blank 1=red 2=green 3=orange
####################################
def colour(n):
if n == 3: #orange
gpio.output(R1,0)
gpio.output(G1,0)
elif n == 2: #green
gpio.output(R1,1)
gpio.output(G1,0)
elif n == 1: #red
gpio.output(R1,0)
gpio.output(G1,1)
else: # off
gpio.output(R1,1)
gpio.output(G1,1)
####################################
# end colour function
####################################
####################################
# colourshift function
# usage: ldp.colourshift(colour_value)
# sets the current led colour
# and also shifts it into the register
# 0=blank 1=red 2=green 3=orange
####################################
def colourshift(n):
if n == 3: #orange
gpio.output(R1,0)
gpio.output(G1,0)
elif n == 2: #green
gpio.output(R1,1)
gpio.output(G1,0)
elif n == 1: #red
gpio.output(R1,0)
gpio.output(G1,1)
else: # off
gpio.output(R1,1)
gpio.output(G1,1)
gpio.output(S,1)
gpio.output(S,0)
gpio.output(S,1)
####################################
# end colour function
####################################
####################################
# showrow function
# usage: ldp.showrow(row_value)
# displays the register on a row
# row_value = 0-7
####################################
def showrow(n):
if n == 7:
gpio.output(A,1)
gpio.output(B,1)
gpio.output(C,1)
gpio.output(D,0)
elif n == 6:
gpio.output(A,0)
gpio.output(B,1)
gpio.output(C,1)
gpio.output(D,0)
elif n == 5:
gpio.output(A,1)
gpio.output(B,0)
gpio.output(C,1)
gpio.output(D,0)
elif n == 4:
gpio.output(A,0)
gpio.output(B,0)
gpio.output(C,1)
gpio.output(D,0)
elif n == 3:
gpio.output(A,1)
gpio.output(B,1)
gpio.output(C,0)
gpio.output(D,0)
elif n == 2:
gpio.output(A,0)
gpio.output(B,2)
gpio.output(C,0)
gpio.output(D,0)
elif n == 1:
gpio.output(A,1)
gpio.output(B,0)
gpio.output(C,0)
gpio.output(D,0)
else:
gpio.output(A,0)
gpio.output(B,0)
gpio.output(C,0)
gpio.output(D,0)
# latch the data
gpio.output(L,1)
gpio.output(L,0)
# display the row
gpio.output(EN,1)
####################################
# end showrow function
####################################
####################################
# displayoff function
# usage: ldp.displayoff()
# turns off the display
####################################
def displayoff():
gpio.output(EN,0)
####################################
# end displayoff function
####################################
####################################
# displayon function
# usage: ldp.displayon()
# turns on the display
####################################
def displayon():
gpio.output(EN,1)
####################################
# end displayon function
####################################
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from collections import defaultdict
from collections import OrderedDict
import six
from oslo_config import cfg
import st2common
from st2common import log as logging
from st2common.bootstrap.triggersregistrar import TriggersRegistrar
from st2common.bootstrap.sensorsregistrar import SensorsRegistrar
from st2common.bootstrap.actionsregistrar import ActionsRegistrar
from st2common.bootstrap.aliasesregistrar import AliasesRegistrar
from st2common.bootstrap.policiesregistrar import PolicyRegistrar
import st2common.bootstrap.policiesregistrar as policies_registrar
import st2common.bootstrap.runnersregistrar as runners_registrar
from st2common.bootstrap.rulesregistrar import RulesRegistrar
import st2common.bootstrap.ruletypesregistrar as rule_types_registrar
from st2common.bootstrap.configsregistrar import ConfigsRegistrar
import st2common.content.utils as content_utils
from st2common.models.db.auth import UserDB
from st2common.models.api.action import LiveActionCreateAPI
from st2common.models.api.pack import PackAPI
from st2common.models.api.pack import PackAsyncAPI
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.persistence.pack import Pack
from st2common.rbac.types import PermissionType
from st2common.rbac.backends import get_rbac_backend
from st2common.services import packs as packs_service
from st2common.router import abort
from st2common.router import Response
from st2api.controllers.resource import ResourceController
from st2api.controllers.v1.actionexecutions import ActionExecutionsControllerMixin
http_client = six.moves.http_client
__all__ = ["PacksController", "BasePacksController", "ENTITIES"]
LOG = logging.getLogger(__name__)
# Note: The order those are defined it's important so they are registered in
# the same order as they are in st2-register-content.
# We also need to use list of tuples to preserve the order.
ENTITIES = OrderedDict(
[
("trigger", (TriggersRegistrar, "triggers")),
("sensor", (SensorsRegistrar, "sensors")),
("action", (ActionsRegistrar, "actions")),
("rule", (RulesRegistrar, "rules")),
("alias", (AliasesRegistrar, "aliases")),
("policy", (PolicyRegistrar, "policies")),
("config", (ConfigsRegistrar, "configs")),
]
)
def _get_proxy_config():
LOG.debug("Loading proxy configuration from env variables %s.", os.environ)
http_proxy = os.environ.get("http_proxy", None)
https_proxy = os.environ.get("https_proxy", None)
no_proxy = os.environ.get("no_proxy", None)
proxy_ca_bundle_path = os.environ.get("proxy_ca_bundle_path", None)
proxy_config = {
"http_proxy": http_proxy,
"https_proxy": https_proxy,
"proxy_ca_bundle_path": proxy_ca_bundle_path,
"no_proxy": no_proxy,
}
LOG.debug("Proxy configuration: %s", proxy_config)
return proxy_config
class PackInstallController(ActionExecutionsControllerMixin):
def post(self, pack_install_request, requester_user=None):
parameters = {
"packs": pack_install_request.packs,
}
if pack_install_request.force:
parameters["force"] = True
if pack_install_request.skip_dependencies:
parameters["skip_dependencies"] = True
if not requester_user:
requester_user = UserDB(name=cfg.CONF.system_user.user)
new_liveaction_api = LiveActionCreateAPI(
action="packs.install", parameters=parameters, user=requester_user.name
)
execution_resp = self._handle_schedule_execution(
liveaction_api=new_liveaction_api, requester_user=requester_user
)
exec_id = PackAsyncAPI(execution_id=execution_resp.json["id"])
return Response(json=exec_id, status=http_client.ACCEPTED)
class PackUninstallController(ActionExecutionsControllerMixin):
def post(self, pack_uninstall_request, ref_or_id=None, requester_user=None):
if ref_or_id:
parameters = {"packs": [ref_or_id]}
else:
parameters = {"packs": pack_uninstall_request.packs}
if not requester_user:
requester_user = UserDB(name=cfg.CONF.system_user.user)
new_liveaction_api = LiveActionCreateAPI(
action="packs.uninstall", parameters=parameters, user=requester_user.name
)
execution_resp = self._handle_schedule_execution(
liveaction_api=new_liveaction_api, requester_user=requester_user
)
exec_id = PackAsyncAPI(execution_id=execution_resp.json["id"])
return Response(json=exec_id, status=http_client.ACCEPTED)
class PackRegisterController(object):
CONTENT_TYPES = [
"runner",
"action",
"trigger",
"sensor",
"rule",
"rule_type",
"alias",
"policy_type",
"policy",
"config",
]
def post(self, pack_register_request):
if pack_register_request and hasattr(pack_register_request, "types"):
types = pack_register_request.types
if "all" in types:
types = PackRegisterController.CONTENT_TYPES
else:
types = PackRegisterController.CONTENT_TYPES
if pack_register_request and hasattr(pack_register_request, "packs"):
packs = list(set(pack_register_request.packs))
else:
packs = None
result = defaultdict(int)
# Register depended resources (actions depend on runners, rules depend on rule types, etc)
if ("runner" in types or "runners" in types) or (
"action" in types or "actions" in types
):
result["runners"] = runners_registrar.register_runners(experimental=True)
if ("rule_type" in types or "rule_types" in types) or (
"rule" in types or "rules" in types
):
result["rule_types"] = rule_types_registrar.register_rule_types()
if ("policy_type" in types or "policy_types" in types) or (
"policy" in types or "policies" in types
):
result["policy_types"] = policies_registrar.register_policy_types(st2common)
use_pack_cache = False
# TODO: To speed up this operation since it's mostli IO bound we could use green thread
# pool here and register different resources concurrently
fail_on_failure = getattr(pack_register_request, "fail_on_failure", True)
for type, (Registrar, name) in six.iteritems(ENTITIES):
if type in types or name in types:
registrar = Registrar(
use_pack_cache=use_pack_cache,
use_runners_cache=True,
fail_on_failure=fail_on_failure,
)
if packs:
for pack in packs:
pack_path = content_utils.get_pack_base_path(pack)
try:
registered_count = registrar.register_from_pack(
pack_dir=pack_path
)
result[name] += registered_count
except ValueError as e:
# Throw more user-friendly exception if requsted pack doesn't exist
if re.match(
'Directory ".*?" doesn\'t exist', six.text_type(e)
):
msg = 'Pack "%s" not found on disk: %s' % (
pack,
six.text_type(e),
)
raise ValueError(msg)
raise e
else:
packs_base_paths = content_utils.get_packs_base_paths()
registered_count = registrar.register_from_packs(
base_dirs=packs_base_paths
)
result[name] += registered_count
return result
class PackSearchController(object):
def post(self, pack_search_request):
proxy_config = _get_proxy_config()
if hasattr(pack_search_request, "query"):
packs = packs_service.search_pack_index(
pack_search_request.query,
case_sensitive=False,
proxy_config=proxy_config,
)
return [PackAPI(**pack) for pack in packs]
else:
pack = packs_service.get_pack_from_index(
pack_search_request.pack, proxy_config=proxy_config
)
return PackAPI(**pack) if pack else []
class IndexHealthController(object):
def get(self):
"""
Check if all listed indexes are healthy: they should be reachable,
return valid JSON objects, and yield more than one result.
"""
proxy_config = _get_proxy_config()
_, status = packs_service.fetch_pack_index(
allow_empty=True, proxy_config=proxy_config
)
health = {
"indexes": {
"count": len(status),
"valid": 0,
"invalid": 0,
"errors": {},
"status": status,
},
"packs": {
"count": 0,
},
}
for index in status:
if index["error"]:
error_count = health["indexes"]["errors"].get(index["error"], 0) + 1
health["indexes"]["invalid"] += 1
health["indexes"]["errors"][index["error"]] = error_count
else:
health["indexes"]["valid"] += 1
health["packs"]["count"] += index["packs"]
return health
class BasePacksController(ResourceController):
model = PackAPI
access = Pack
def _get_one_by_ref_or_id(self, ref_or_id, requester_user, exclude_fields=None):
instance = self._get_by_ref_or_id(
ref_or_id=ref_or_id, exclude_fields=exclude_fields
)
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_db_permission(
user_db=requester_user,
resource_db=instance,
permission_type=PermissionType.PACK_VIEW,
)
if not instance:
msg = 'Unable to identify resource with ref_or_id "%s".' % (ref_or_id)
abort(http_client.NOT_FOUND, msg)
return
result = self.model.from_model(instance, **self.from_model_kwargs)
return result
def _get_by_ref_or_id(self, ref_or_id, exclude_fields=None):
resource_db = self._get_by_id(
resource_id=ref_or_id, exclude_fields=exclude_fields
)
if not resource_db:
# Try ref
resource_db = self._get_by_ref(ref=ref_or_id, exclude_fields=exclude_fields)
if not resource_db:
msg = 'Resource with a ref or id "%s" not found' % (ref_or_id)
raise StackStormDBObjectNotFoundError(msg)
return resource_db
def _get_by_ref(self, ref, exclude_fields=None):
"""
Note: In this case "ref" is pack name and not StackStorm's ResourceReference.
"""
resource_db = self.access.query(ref=ref, exclude_fields=exclude_fields).first()
return resource_db
class PacksIndexController:
search = PackSearchController()
health = IndexHealthController()
def get_all(self):
proxy_config = _get_proxy_config()
index, status = packs_service.fetch_pack_index(proxy_config=proxy_config)
return {"status": status, "index": index}
class PacksController(BasePacksController):
from st2api.controllers.v1.pack_views import PackViewsController
model = PackAPI
access = Pack
supported_filters = {"name": "name", "ref": "ref"}
query_options = {"sort": ["ref"]}
# Nested controllers
install = PackInstallController()
uninstall = PackUninstallController()
register = PackRegisterController()
views = PackViewsController()
index = PacksIndexController()
def __init__(self):
super(PacksController, self).__init__()
self.get_one_db_method = self._get_by_ref_or_id
def get_all(
self,
exclude_attributes=None,
include_attributes=None,
sort=None,
offset=0,
limit=None,
requester_user=None,
**raw_filters,
):
return super(PacksController, self)._get_all(
exclude_fields=exclude_attributes,
include_fields=include_attributes,
sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters,
requester_user=requester_user,
)
def get_one(self, ref_or_id, requester_user):
return self._get_one_by_ref_or_id(
ref_or_id=ref_or_id, requester_user=requester_user
)
packs_controller = PacksController()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.